tcp_timewait.c revision 34923
1/* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 34 * $Id: tcp_subr.c,v 1.43 1998/03/24 18:06:28 wollman Exp $ 35 */ 36 37#include "opt_compat.h" 38#include "opt_tcpdebug.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/kernel.h> 43#include <sys/sysctl.h> 44#include <sys/malloc.h> 45#include <sys/mbuf.h> 46#include <sys/socket.h> 47#include <sys/socketvar.h> 48#include <sys/protosw.h> 49 50#include <vm/vm_zone.h> 51 52#include <net/route.h> 53#include <net/if.h> 54 55#define _IP_VHL 56#include <netinet/in.h> 57#include <netinet/in_systm.h> 58#include <netinet/ip.h> 59#include <netinet/in_pcb.h> 60#include <netinet/in_var.h> 61#include <netinet/ip_var.h> 62#include <netinet/tcp.h> 63#include <netinet/tcp_fsm.h> 64#include <netinet/tcp_seq.h> 65#include <netinet/tcp_timer.h> 66#include <netinet/tcp_var.h> 67#include <netinet/tcpip.h> 68#ifdef TCPDEBUG 69#include <netinet/tcp_debug.h> 70#endif 71 72int tcp_mssdflt = TCP_MSS; 73SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, 74 CTLFLAG_RW, &tcp_mssdflt , 0, ""); 75 76static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 77SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, 78 CTLFLAG_RW, &tcp_rttdflt , 0, ""); 79 80static int tcp_do_rfc1323 = 1; 81SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, 82 CTLFLAG_RW, &tcp_do_rfc1323 , 0, ""); 83 84static int tcp_do_rfc1644 = 1; 85SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, 86 CTLFLAG_RW, &tcp_do_rfc1644 , 0, ""); 87 88static void tcp_cleartaocache __P((void)); 89static void tcp_notify __P((struct inpcb *, int)); 90 91/* 92 * Target size of TCP PCB hash tables. Must be a power of two. 93 */ 94#ifndef TCBHASHSIZE 95#define TCBHASHSIZE 512 96#endif 97 98/* 99 * This is the actual shape of what we allocate using the zone 100 * allocator. Doing it this way allows us to protect both structures 101 * using the same generation count, and also eliminates the overhead 102 * of allocating tcpcbs separately. By hiding the structure here, 103 * we avoid changing most of the rest of the code (although it needs 104 * to be changed, eventually, for greater efficiency). 105 */ 106#define ALIGNMENT 32 107#define ALIGNM1 (ALIGNMENT - 1) 108struct inp_tp { 109 union { 110 struct inpcb inp; 111 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 112 } inp_tp_u; 113 struct tcpcb tcb; 114}; 115#undef ALIGNMENT 116#undef ALIGNM1 117 118/* 119 * Tcp initialization 120 */ 121void 122tcp_init() 123{ 124 125 tcp_iss = random(); /* wrong, but better than a constant */ 126 tcp_ccgen = 1; 127 tcp_cleartaocache(); 128 LIST_INIT(&tcb); 129 tcbinfo.listhead = &tcb; 130 tcbinfo.hashbase = hashinit(TCBHASHSIZE, M_PCB, &tcbinfo.hashmask); 131 tcbinfo.porthashbase = hashinit(TCBHASHSIZE, M_PCB, 132 &tcbinfo.porthashmask); 133 /* For the moment, we just worry about putting inpcbs here. */ 134 /* 135 * Rationale for a maximum of `nmbclusters': 136 * 1) It's a convenient value, sized by config, based on 137 * parameters already known to be tweakable as needed 138 * for network-intensive systems. 139 * 2) Under the Old World Order, when pcbs were stored in 140 * mbufs, it was of course impossible to have more 141 * pcbs than mbufs. 142 * 3) The zone allocator doesn't allocate physical memory 143 * for this many pcbs; it just sizes the virtual 144 * address space appropriately. Thus, even for very large 145 * values of nmbclusters, we don't actually take up much 146 * memory unless required. 147 */ 148 tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), nmbclusters, 149 ZONE_INTERRUPT, 0); 150 if (max_protohdr < sizeof(struct tcpiphdr)) 151 max_protohdr = sizeof(struct tcpiphdr); 152 if (max_linkhdr + sizeof(struct tcpiphdr) > MHLEN) 153 panic("tcp_init"); 154} 155 156/* 157 * Create template to be used to send tcp packets on a connection. 158 * Call after host entry created, allocates an mbuf and fills 159 * in a skeletal tcp/ip header, minimizing the amount of work 160 * necessary when the connection is used. 161 */ 162struct tcpiphdr * 163tcp_template(tp) 164 struct tcpcb *tp; 165{ 166 register struct inpcb *inp = tp->t_inpcb; 167 register struct mbuf *m; 168 register struct tcpiphdr *n; 169 170 if ((n = tp->t_template) == 0) { 171 m = m_get(M_DONTWAIT, MT_HEADER); 172 if (m == NULL) 173 return (0); 174 m->m_len = sizeof (struct tcpiphdr); 175 n = mtod(m, struct tcpiphdr *); 176 } 177 n->ti_next = n->ti_prev = 0; 178 n->ti_x1 = 0; 179 n->ti_pr = IPPROTO_TCP; 180 n->ti_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip)); 181 n->ti_src = inp->inp_laddr; 182 n->ti_dst = inp->inp_faddr; 183 n->ti_sport = inp->inp_lport; 184 n->ti_dport = inp->inp_fport; 185 n->ti_seq = 0; 186 n->ti_ack = 0; 187 n->ti_x2 = 0; 188 n->ti_off = 5; 189 n->ti_flags = 0; 190 n->ti_win = 0; 191 n->ti_sum = 0; 192 n->ti_urp = 0; 193 return (n); 194} 195 196/* 197 * Send a single message to the TCP at address specified by 198 * the given TCP/IP header. If m == 0, then we make a copy 199 * of the tcpiphdr at ti and send directly to the addressed host. 200 * This is used to force keep alive messages out using the TCP 201 * template for a connection tp->t_template. If flags are given 202 * then we send a message back to the TCP which originated the 203 * segment ti, and discard the mbuf containing it and any other 204 * attached mbufs. 205 * 206 * In any case the ack and sequence number of the transmitted 207 * segment are as specified by the parameters. 208 * 209 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 210 */ 211void 212tcp_respond(tp, ti, m, ack, seq, flags) 213 struct tcpcb *tp; 214 register struct tcpiphdr *ti; 215 register struct mbuf *m; 216 tcp_seq ack, seq; 217 int flags; 218{ 219 register int tlen; 220 int win = 0; 221 struct route *ro = 0; 222 struct route sro; 223 224 if (tp) { 225 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 226 ro = &tp->t_inpcb->inp_route; 227 } else { 228 ro = &sro; 229 bzero(ro, sizeof *ro); 230 } 231 if (m == 0) { 232 m = m_gethdr(M_DONTWAIT, MT_HEADER); 233 if (m == NULL) 234 return; 235#ifdef TCP_COMPAT_42 236 tlen = 1; 237#else 238 tlen = 0; 239#endif 240 m->m_data += max_linkhdr; 241 *mtod(m, struct tcpiphdr *) = *ti; 242 ti = mtod(m, struct tcpiphdr *); 243 flags = TH_ACK; 244 } else { 245 m_freem(m->m_next); 246 m->m_next = 0; 247 m->m_data = (caddr_t)ti; 248 m->m_len = sizeof (struct tcpiphdr); 249 tlen = 0; 250#define xchg(a,b,type) { type t; t=a; a=b; b=t; } 251 xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, u_long); 252 xchg(ti->ti_dport, ti->ti_sport, u_short); 253#undef xchg 254 } 255 ti->ti_len = htons((u_short)(sizeof (struct tcphdr) + tlen)); 256 tlen += sizeof (struct tcpiphdr); 257 m->m_len = tlen; 258 m->m_pkthdr.len = tlen; 259 m->m_pkthdr.rcvif = (struct ifnet *) 0; 260 ti->ti_next = ti->ti_prev = 0; 261 ti->ti_x1 = 0; 262 ti->ti_seq = htonl(seq); 263 ti->ti_ack = htonl(ack); 264 ti->ti_x2 = 0; 265 ti->ti_off = sizeof (struct tcphdr) >> 2; 266 ti->ti_flags = flags; 267 if (tp) 268 ti->ti_win = htons((u_short) (win >> tp->rcv_scale)); 269 else 270 ti->ti_win = htons((u_short)win); 271 ti->ti_urp = 0; 272 ti->ti_sum = 0; 273 ti->ti_sum = in_cksum(m, tlen); 274 ((struct ip *)ti)->ip_len = tlen; 275 ((struct ip *)ti)->ip_ttl = ip_defttl; 276#ifdef TCPDEBUG 277 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 278 tcp_trace(TA_OUTPUT, 0, tp, ti, 0); 279#endif 280 (void) ip_output(m, NULL, ro, 0, NULL); 281 if (ro == &sro && ro->ro_rt) { 282 RTFREE(ro->ro_rt); 283 } 284} 285 286/* 287 * Create a new TCP control block, making an 288 * empty reassembly queue and hooking it to the argument 289 * protocol control block. The `inp' parameter must have 290 * come from the zone allocator set up in tcp_init(). 291 */ 292struct tcpcb * 293tcp_newtcpcb(inp) 294 struct inpcb *inp; 295{ 296 struct inp_tp *it; 297 register struct tcpcb *tp; 298 299 it = (struct inp_tp *)inp; 300 tp = &it->tcb; 301 bzero((char *) tp, sizeof(struct tcpcb)); 302 tp->seg_next = tp->seg_prev = (struct tcpiphdr *)tp; 303 tp->t_maxseg = tp->t_maxopd = tcp_mssdflt; 304 305 if (tcp_do_rfc1323) 306 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 307 if (tcp_do_rfc1644) 308 tp->t_flags |= TF_REQ_CC; 309 tp->t_inpcb = inp; /* XXX */ 310 /* 311 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 312 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 313 * reasonable initial retransmit time. 314 */ 315 tp->t_srtt = TCPTV_SRTTBASE; 316 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 317 tp->t_rttmin = TCPTV_MIN; 318 tp->t_rxtcur = TCPTV_RTOBASE; 319 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 320 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 321 inp->inp_ip_ttl = ip_defttl; 322 inp->inp_ppcb = (caddr_t)tp; 323 return (tp); /* XXX */ 324} 325 326/* 327 * Drop a TCP connection, reporting 328 * the specified error. If connection is synchronized, 329 * then send a RST to peer. 330 */ 331struct tcpcb * 332tcp_drop(tp, errno) 333 register struct tcpcb *tp; 334 int errno; 335{ 336 struct socket *so = tp->t_inpcb->inp_socket; 337 338 if (TCPS_HAVERCVDSYN(tp->t_state)) { 339 tp->t_state = TCPS_CLOSED; 340 (void) tcp_output(tp); 341 tcpstat.tcps_drops++; 342 } else 343 tcpstat.tcps_conndrops++; 344 if (errno == ETIMEDOUT && tp->t_softerror) 345 errno = tp->t_softerror; 346 so->so_error = errno; 347 return (tcp_close(tp)); 348} 349 350/* 351 * Close a TCP control block: 352 * discard all space held by the tcp 353 * discard internet protocol block 354 * wake up any sleepers 355 */ 356struct tcpcb * 357tcp_close(tp) 358 register struct tcpcb *tp; 359{ 360 register struct tcpiphdr *t; 361 struct inpcb *inp = tp->t_inpcb; 362 struct socket *so = inp->inp_socket; 363 register struct mbuf *m; 364 register struct rtentry *rt; 365 int dosavessthresh; 366 367 /* 368 * If we got enough samples through the srtt filter, 369 * save the rtt and rttvar in the routing entry. 370 * 'Enough' is arbitrarily defined as the 16 samples. 371 * 16 samples is enough for the srtt filter to converge 372 * to within 5% of the correct value; fewer samples and 373 * we could save a very bogus rtt. 374 * 375 * Don't update the default route's characteristics and don't 376 * update anything that the user "locked". 377 */ 378 if (tp->t_rttupdated >= 16 && 379 (rt = inp->inp_route.ro_rt) && 380 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr != INADDR_ANY) { 381 register u_long i = 0; 382 383 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 384 i = tp->t_srtt * 385 (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE)); 386 if (rt->rt_rmx.rmx_rtt && i) 387 /* 388 * filter this update to half the old & half 389 * the new values, converting scale. 390 * See route.h and tcp_var.h for a 391 * description of the scaling constants. 392 */ 393 rt->rt_rmx.rmx_rtt = 394 (rt->rt_rmx.rmx_rtt + i) / 2; 395 else 396 rt->rt_rmx.rmx_rtt = i; 397 tcpstat.tcps_cachedrtt++; 398 } 399 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 400 i = tp->t_rttvar * 401 (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE)); 402 if (rt->rt_rmx.rmx_rttvar && i) 403 rt->rt_rmx.rmx_rttvar = 404 (rt->rt_rmx.rmx_rttvar + i) / 2; 405 else 406 rt->rt_rmx.rmx_rttvar = i; 407 tcpstat.tcps_cachedrttvar++; 408 } 409 /* 410 * The old comment here said: 411 * update the pipelimit (ssthresh) if it has been updated 412 * already or if a pipesize was specified & the threshhold 413 * got below half the pipesize. I.e., wait for bad news 414 * before we start updating, then update on both good 415 * and bad news. 416 * 417 * But we want to save the ssthresh even if no pipesize is 418 * specified explicitly in the route, because such 419 * connections still have an implicit pipesize specified 420 * by the global tcp_sendspace. In the absence of a reliable 421 * way to calculate the pipesize, it will have to do. 422 */ 423 i = tp->snd_ssthresh; 424#if 1 425 if (rt->rt_rmx.rmx_sendpipe != 0) 426 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2); 427 else 428 dosavessthresh = (i < so->so_snd.sb_hiwat / 2); 429#else 430 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2); 431#endif 432 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 433 i != 0 && rt->rt_rmx.rmx_ssthresh != 0) 434 || dosavessthresh) { 435 /* 436 * convert the limit from user data bytes to 437 * packets then to packet data bytes. 438 */ 439 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 440 if (i < 2) 441 i = 2; 442 i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr)); 443 if (rt->rt_rmx.rmx_ssthresh) 444 rt->rt_rmx.rmx_ssthresh = 445 (rt->rt_rmx.rmx_ssthresh + i) / 2; 446 else 447 rt->rt_rmx.rmx_ssthresh = i; 448 tcpstat.tcps_cachedssthresh++; 449 } 450 } 451 /* free the reassembly queue, if any */ 452 t = tp->seg_next; 453 while (t != (struct tcpiphdr *)tp) { 454 t = (struct tcpiphdr *)t->ti_next; 455 m = REASS_MBUF((struct tcpiphdr *)t->ti_prev); 456 remque(t->ti_prev); 457 m_freem(m); 458 } 459 if (tp->t_template) 460 (void) m_free(dtom(tp->t_template)); 461 inp->inp_ppcb = NULL; 462 soisdisconnected(so); 463 in_pcbdetach(inp); 464 tcpstat.tcps_closed++; 465 return ((struct tcpcb *)0); 466} 467 468void 469tcp_drain() 470{ 471 472} 473 474/* 475 * Notify a tcp user of an asynchronous error; 476 * store error as soft error, but wake up user 477 * (for now, won't do anything until can select for soft error). 478 */ 479static void 480tcp_notify(inp, error) 481 struct inpcb *inp; 482 int error; 483{ 484 register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 485 register struct socket *so = inp->inp_socket; 486 487 /* 488 * Ignore some errors if we are hooked up. 489 * If connection hasn't completed, has retransmitted several times, 490 * and receives a second error, give up now. This is better 491 * than waiting a long time to establish a connection that 492 * can never complete. 493 */ 494 if (tp->t_state == TCPS_ESTABLISHED && 495 (error == EHOSTUNREACH || error == ENETUNREACH || 496 error == EHOSTDOWN)) { 497 return; 498 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 499 tp->t_softerror) 500 so->so_error = error; 501 else 502 tp->t_softerror = error; 503 wakeup((caddr_t) &so->so_timeo); 504 sorwakeup(so); 505 sowwakeup(so); 506} 507 508void 509tcp_ctlinput(cmd, sa, vip) 510 int cmd; 511 struct sockaddr *sa; 512 void *vip; 513{ 514 register struct ip *ip = vip; 515 register struct tcphdr *th; 516 void (*notify) __P((struct inpcb *, int)) = tcp_notify; 517 518 if (cmd == PRC_QUENCH) 519 notify = tcp_quench; 520#if 1 521 else if (cmd == PRC_MSGSIZE) 522 notify = tcp_mtudisc; 523#endif 524 else if (!PRC_IS_REDIRECT(cmd) && 525 ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0)) 526 return; 527 if (ip) { 528 th = (struct tcphdr *)((caddr_t)ip 529 + (IP_VHL_HL(ip->ip_vhl) << 2)); 530 in_pcbnotify(&tcb, sa, th->th_dport, ip->ip_src, th->th_sport, 531 cmd, notify); 532 } else 533 in_pcbnotify(&tcb, sa, 0, zeroin_addr, 0, cmd, notify); 534} 535 536/* 537 * When a source quench is received, close congestion window 538 * to one segment. We will gradually open it again as we proceed. 539 */ 540void 541tcp_quench(inp, errno) 542 struct inpcb *inp; 543 int errno; 544{ 545 struct tcpcb *tp = intotcpcb(inp); 546 547 if (tp) 548 tp->snd_cwnd = tp->t_maxseg; 549} 550 551#if 1 552/* 553 * When `need fragmentation' ICMP is received, update our idea of the MSS 554 * based on the new value in the route. Also nudge TCP to send something, 555 * since we know the packet we just sent was dropped. 556 * This duplicates some code in the tcp_mss() function in tcp_input.c. 557 */ 558void 559tcp_mtudisc(inp, errno) 560 struct inpcb *inp; 561 int errno; 562{ 563 struct tcpcb *tp = intotcpcb(inp); 564 struct rtentry *rt; 565 struct rmxp_tao *taop; 566 struct socket *so = inp->inp_socket; 567 int offered; 568 int mss; 569 570 if (tp) { 571 rt = tcp_rtlookup(inp); 572 if (!rt || !rt->rt_rmx.rmx_mtu) { 573 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt; 574 return; 575 } 576 taop = rmx_taop(rt->rt_rmx); 577 offered = taop->tao_mssopt; 578 mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr); 579 if (offered) 580 mss = min(mss, offered); 581 /* 582 * XXX - The above conditional probably violates the TCP 583 * spec. The problem is that, since we don't know the 584 * other end's MSS, we are supposed to use a conservative 585 * default. But, if we do that, then MTU discovery will 586 * never actually take place, because the conservative 587 * default is much less than the MTUs typically seen 588 * on the Internet today. For the moment, we'll sweep 589 * this under the carpet. 590 * 591 * The conservative default might not actually be a problem 592 * if the only case this occurs is when sending an initial 593 * SYN with options and data to a host we've never talked 594 * to before. Then, they will reply with an MSS value which 595 * will get recorded and the new parameters should get 596 * recomputed. For Further Study. 597 */ 598 if (tp->t_maxopd <= mss) 599 return; 600 tp->t_maxopd = mss; 601 602 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 603 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 604 mss -= TCPOLEN_TSTAMP_APPA; 605 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 606 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 607 mss -= TCPOLEN_CC_APPA; 608#if (MCLBYTES & (MCLBYTES - 1)) == 0 609 if (mss > MCLBYTES) 610 mss &= ~(MCLBYTES-1); 611#else 612 if (mss > MCLBYTES) 613 mss = mss / MCLBYTES * MCLBYTES; 614#endif 615 if (so->so_snd.sb_hiwat < mss) 616 mss = so->so_snd.sb_hiwat; 617 618 tp->t_maxseg = mss; 619 620 tcpstat.tcps_mturesent++; 621 tp->t_rtt = 0; 622 tp->snd_nxt = tp->snd_una; 623 tcp_output(tp); 624 } 625} 626#endif 627 628/* 629 * Look-up the routing entry to the peer of this inpcb. If no route 630 * is found and it cannot be allocated the return NULL. This routine 631 * is called by TCP routines that access the rmx structure and by tcp_mss 632 * to get the interface MTU. 633 */ 634struct rtentry * 635tcp_rtlookup(inp) 636 struct inpcb *inp; 637{ 638 struct route *ro; 639 struct rtentry *rt; 640 641 ro = &inp->inp_route; 642 rt = ro->ro_rt; 643 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 644 /* No route yet, so try to acquire one */ 645 if (inp->inp_faddr.s_addr != INADDR_ANY) { 646 ro->ro_dst.sa_family = AF_INET; 647 ro->ro_dst.sa_len = sizeof(ro->ro_dst); 648 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 649 inp->inp_faddr; 650 rtalloc(ro); 651 rt = ro->ro_rt; 652 } 653 } 654 return rt; 655} 656 657/* 658 * Return a pointer to the cached information about the remote host. 659 * The cached information is stored in the protocol specific part of 660 * the route metrics. 661 */ 662struct rmxp_tao * 663tcp_gettaocache(inp) 664 struct inpcb *inp; 665{ 666 struct rtentry *rt = tcp_rtlookup(inp); 667 668 /* Make sure this is a host route and is up. */ 669 if (rt == NULL || 670 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) 671 return NULL; 672 673 return rmx_taop(rt->rt_rmx); 674} 675 676/* 677 * Clear all the TAO cache entries, called from tcp_init. 678 * 679 * XXX 680 * This routine is just an empty one, because we assume that the routing 681 * routing tables are initialized at the same time when TCP, so there is 682 * nothing in the cache left over. 683 */ 684static void 685tcp_cleartaocache() 686{ 687} 688