tcp_input.c revision 7417
1/* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * From: @(#)tcp_input.c 8.5 (Berkeley) 4/10/94 34 * $Id: tcp_input.c,v 1.16 1995/03/16 18:15:04 bde Exp $ 35 */ 36 37#ifndef TUBA_INCLUDE 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/malloc.h> 41#include <sys/mbuf.h> 42#include <sys/protosw.h> 43#include <sys/socket.h> 44#include <sys/socketvar.h> 45#include <sys/errno.h> 46 47#include <net/if.h> 48#include <net/route.h> 49 50#include <netinet/in.h> 51#include <netinet/in_systm.h> 52#include <netinet/ip.h> 53#include <netinet/in_pcb.h> 54#include <netinet/ip_var.h> 55#include <netinet/tcp.h> 56#include <netinet/tcp_fsm.h> 57#include <netinet/tcp_seq.h> 58#include <netinet/tcp_timer.h> 59#include <netinet/tcp_var.h> 60#include <netinet/tcpip.h> 61#ifdef TCPDEBUG 62#include <netinet/tcp_debug.h> 63struct tcpiphdr tcp_saveti; 64#endif 65 66int tcprexmtthresh = 3; 67struct inpcb *tcp_last_inpcb = &tcb; 68tcp_seq tcp_iss; 69tcp_cc tcp_ccgen; 70struct inpcb tcb; 71struct tcpstat tcpstat; 72u_long tcp_now; 73 74#endif /* TUBA_INCLUDE */ 75 76/* 77 * Insert segment ti into reassembly queue of tcp with 78 * control block tp. Return TH_FIN if reassembly now includes 79 * a segment with FIN. The macro form does the common case inline 80 * (segment is the next to be received on an established connection, 81 * and the queue is empty), avoiding linkage into and removal 82 * from the queue and repetition of various conversions. 83 * Set DELACK for segments received in order, but ack immediately 84 * when segments are out of order (so fast retransmit can work). 85 */ 86#define TCP_REASS(tp, ti, m, so, flags) { \ 87 if ((ti)->ti_seq == (tp)->rcv_nxt && \ 88 (tp)->seg_next == (struct tcpiphdr *)(tp) && \ 89 (tp)->t_state == TCPS_ESTABLISHED) { \ 90 tp->t_flags |= TF_DELACK; \ 91 (tp)->rcv_nxt += (ti)->ti_len; \ 92 flags = (ti)->ti_flags & TH_FIN; \ 93 tcpstat.tcps_rcvpack++;\ 94 tcpstat.tcps_rcvbyte += (ti)->ti_len;\ 95 sbappend(&(so)->so_rcv, (m)); \ 96 sorwakeup(so); \ 97 } else { \ 98 (flags) = tcp_reass((tp), (ti), (m)); \ 99 tp->t_flags |= TF_ACKNOW; \ 100 } \ 101} 102#ifndef TUBA_INCLUDE 103 104int 105tcp_reass(tp, ti, m) 106 register struct tcpcb *tp; 107 register struct tcpiphdr *ti; 108 struct mbuf *m; 109{ 110 register struct tcpiphdr *q; 111 struct socket *so = tp->t_inpcb->inp_socket; 112 int flags; 113 114 /* 115 * Call with ti==0 after become established to 116 * force pre-ESTABLISHED data up to user socket. 117 */ 118 if (ti == 0) 119 goto present; 120 121 /* 122 * Find a segment which begins after this one does. 123 */ 124 for (q = tp->seg_next; q != (struct tcpiphdr *)tp; 125 q = (struct tcpiphdr *)q->ti_next) 126 if (SEQ_GT(q->ti_seq, ti->ti_seq)) 127 break; 128 129 /* 130 * If there is a preceding segment, it may provide some of 131 * our data already. If so, drop the data from the incoming 132 * segment. If it provides all of our data, drop us. 133 */ 134 if ((struct tcpiphdr *)q->ti_prev != (struct tcpiphdr *)tp) { 135 register int i; 136 q = (struct tcpiphdr *)q->ti_prev; 137 /* conversion to int (in i) handles seq wraparound */ 138 i = q->ti_seq + q->ti_len - ti->ti_seq; 139 if (i > 0) { 140 if (i >= ti->ti_len) { 141 tcpstat.tcps_rcvduppack++; 142 tcpstat.tcps_rcvdupbyte += ti->ti_len; 143 m_freem(m); 144 /* 145 * Try to present any queued data 146 * at the left window edge to the user. 147 * This is needed after the 3-WHS 148 * completes. 149 */ 150 goto present; /* ??? */ 151 } 152 m_adj(m, i); 153 ti->ti_len -= i; 154 ti->ti_seq += i; 155 } 156 q = (struct tcpiphdr *)(q->ti_next); 157 } 158 tcpstat.tcps_rcvoopack++; 159 tcpstat.tcps_rcvoobyte += ti->ti_len; 160 REASS_MBUF(ti) = m; /* XXX */ 161 162 /* 163 * While we overlap succeeding segments trim them or, 164 * if they are completely covered, dequeue them. 165 */ 166 while (q != (struct tcpiphdr *)tp) { 167 register int i = (ti->ti_seq + ti->ti_len) - q->ti_seq; 168 if (i <= 0) 169 break; 170 if (i < q->ti_len) { 171 q->ti_seq += i; 172 q->ti_len -= i; 173 m_adj(REASS_MBUF(q), i); 174 break; 175 } 176 q = (struct tcpiphdr *)q->ti_next; 177 m = REASS_MBUF((struct tcpiphdr *)q->ti_prev); 178 remque(q->ti_prev); 179 m_freem(m); 180 } 181 182 /* 183 * Stick new segment in its place. 184 */ 185 insque(ti, q->ti_prev); 186 187present: 188 /* 189 * Present data to user, advancing rcv_nxt through 190 * completed sequence space. 191 */ 192 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 193 return (0); 194 ti = tp->seg_next; 195 if (ti == (struct tcpiphdr *)tp || ti->ti_seq != tp->rcv_nxt) 196 return (0); 197 if (tp->t_state == TCPS_SYN_RECEIVED && ti->ti_len) 198 return (0); 199 do { 200 tp->rcv_nxt += ti->ti_len; 201 flags = ti->ti_flags & TH_FIN; 202 remque(ti); 203 m = REASS_MBUF(ti); 204 ti = (struct tcpiphdr *)ti->ti_next; 205 if (so->so_state & SS_CANTRCVMORE) 206 m_freem(m); 207 else 208 sbappend(&so->so_rcv, m); 209 } while (ti != (struct tcpiphdr *)tp && ti->ti_seq == tp->rcv_nxt); 210 sorwakeup(so); 211 return (flags); 212} 213 214/* 215 * TCP input routine, follows pages 65-76 of the 216 * protocol specification dated September, 1981 very closely. 217 */ 218void 219tcp_input(m, iphlen) 220 register struct mbuf *m; 221 int iphlen; 222{ 223 register struct tcpiphdr *ti; 224 register struct inpcb *inp; 225 caddr_t optp = NULL; 226 int optlen = 0; 227 int len, tlen, off; 228 register struct tcpcb *tp = 0; 229 register int tiflags; 230 struct socket *so = 0; 231 int todrop, acked, ourfinisacked, needoutput = 0; 232 struct in_addr laddr; 233 int dropsocket = 0; 234 int iss = 0; 235 u_long tiwin; 236 struct tcpopt to; /* options in this segment */ 237 struct rmxp_tao *taop; /* pointer to our TAO cache entry */ 238 struct rmxp_tao tao_noncached; /* in case there's no cached entry */ 239#ifdef TCPDEBUG 240 short ostate = 0; 241#endif 242 243 bzero((char *)&to, sizeof(to)); 244 245 tcpstat.tcps_rcvtotal++; 246 /* 247 * Get IP and TCP header together in first mbuf. 248 * Note: IP leaves IP header in first mbuf. 249 */ 250 ti = mtod(m, struct tcpiphdr *); 251 if (iphlen > sizeof (struct ip)) 252 ip_stripoptions(m, (struct mbuf *)0); 253 if (m->m_len < sizeof (struct tcpiphdr)) { 254 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) { 255 tcpstat.tcps_rcvshort++; 256 return; 257 } 258 ti = mtod(m, struct tcpiphdr *); 259 } 260 261 /* 262 * Checksum extended TCP header and data. 263 */ 264 tlen = ((struct ip *)ti)->ip_len; 265 len = sizeof (struct ip) + tlen; 266 ti->ti_next = ti->ti_prev = 0; 267 ti->ti_x1 = 0; 268 ti->ti_len = (u_short)tlen; 269 HTONS(ti->ti_len); 270 ti->ti_sum = in_cksum(m, len); 271 if (ti->ti_sum) { 272 tcpstat.tcps_rcvbadsum++; 273 goto drop; 274 } 275#endif /* TUBA_INCLUDE */ 276 277 /* 278 * Check that TCP offset makes sense, 279 * pull out TCP options and adjust length. XXX 280 */ 281 off = ti->ti_off << 2; 282 if (off < sizeof (struct tcphdr) || off > tlen) { 283 tcpstat.tcps_rcvbadoff++; 284 goto drop; 285 } 286 tlen -= off; 287 ti->ti_len = tlen; 288 if (off > sizeof (struct tcphdr)) { 289 if (m->m_len < sizeof(struct ip) + off) { 290 if ((m = m_pullup(m, sizeof (struct ip) + off)) == 0) { 291 tcpstat.tcps_rcvshort++; 292 return; 293 } 294 ti = mtod(m, struct tcpiphdr *); 295 } 296 optlen = off - sizeof (struct tcphdr); 297 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr); 298 /* 299 * Do quick retrieval of timestamp options ("options 300 * prediction?"). If timestamp is the only option and it's 301 * formatted as recommended in RFC 1323 appendix A, we 302 * quickly get the values now and not bother calling 303 * tcp_dooptions(), etc. 304 */ 305 if ((optlen == TCPOLEN_TSTAMP_APPA || 306 (optlen > TCPOLEN_TSTAMP_APPA && 307 optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && 308 *(u_long *)optp == htonl(TCPOPT_TSTAMP_HDR) && 309 (ti->ti_flags & TH_SYN) == 0) { 310 to.to_flag |= TOF_TS; 311 to.to_tsval = ntohl(*(u_long *)(optp + 4)); 312 to.to_tsecr = ntohl(*(u_long *)(optp + 8)); 313 optp = NULL; /* we've parsed the options */ 314 } 315 } 316 tiflags = ti->ti_flags; 317 318 /* 319 * Convert TCP protocol specific fields to host format. 320 */ 321 NTOHL(ti->ti_seq); 322 NTOHL(ti->ti_ack); 323 NTOHS(ti->ti_win); 324 NTOHS(ti->ti_urp); 325 326 /* 327 * Locate pcb for segment. 328 */ 329findpcb: 330 inp = tcp_last_inpcb; 331 if (inp->inp_lport != ti->ti_dport || 332 inp->inp_fport != ti->ti_sport || 333 inp->inp_faddr.s_addr != ti->ti_src.s_addr || 334 inp->inp_laddr.s_addr != ti->ti_dst.s_addr) { 335 inp = in_pcblookup(&tcb, ti->ti_src, ti->ti_sport, 336 ti->ti_dst, ti->ti_dport, INPLOOKUP_WILDCARD); 337 if (inp) 338 tcp_last_inpcb = inp; 339 ++tcpstat.tcps_pcbcachemiss; 340 } 341 342 /* 343 * If the state is CLOSED (i.e., TCB does not exist) then 344 * all data in the incoming segment is discarded. 345 * If the TCB exists but is in CLOSED state, it is embryonic, 346 * but should either do a listen or a connect soon. 347 */ 348 if (inp == 0) 349 goto dropwithreset; 350 tp = intotcpcb(inp); 351 if (tp == 0) 352 goto dropwithreset; 353 if (tp->t_state == TCPS_CLOSED) 354 goto drop; 355 356 /* Unscale the window into a 32-bit value. */ 357 if ((tiflags & TH_SYN) == 0) 358 tiwin = ti->ti_win << tp->snd_scale; 359 else 360 tiwin = ti->ti_win; 361 362 so = inp->inp_socket; 363 if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) { 364#ifdef TCPDEBUG 365 if (so->so_options & SO_DEBUG) { 366 ostate = tp->t_state; 367 tcp_saveti = *ti; 368 } 369#endif 370 if (so->so_options & SO_ACCEPTCONN) { 371 register struct tcpcb *tp0 = tp; 372 so = sonewconn(so, 0); 373 if (so == 0) 374 goto drop; 375 /* 376 * This is ugly, but .... 377 * 378 * Mark socket as temporary until we're 379 * committed to keeping it. The code at 380 * ``drop'' and ``dropwithreset'' check the 381 * flag dropsocket to see if the temporary 382 * socket created here should be discarded. 383 * We mark the socket as discardable until 384 * we're committed to it below in TCPS_LISTEN. 385 */ 386 dropsocket++; 387 inp = (struct inpcb *)so->so_pcb; 388 inp->inp_laddr = ti->ti_dst; 389 inp->inp_lport = ti->ti_dport; 390#if BSD>=43 391 inp->inp_options = ip_srcroute(); 392#endif 393 tp = intotcpcb(inp); 394 tp->t_state = TCPS_LISTEN; 395 tp->t_flags |= tp0->t_flags & (TF_NOPUSH|TF_NOOPT); 396 397 /* Compute proper scaling value from buffer space */ 398 while (tp->request_r_scale < TCP_MAX_WINSHIFT && 399 TCP_MAXWIN << tp->request_r_scale < so->so_rcv.sb_hiwat) 400 tp->request_r_scale++; 401 } 402 } 403 404 /* 405 * Segment received on connection. 406 * Reset idle time and keep-alive timer. 407 */ 408 tp->t_idle = 0; 409 tp->t_timer[TCPT_KEEP] = tcp_keepidle; 410 411 /* 412 * Process options if not in LISTEN state, 413 * else do it below (after getting remote address). 414 */ 415 if (optp && tp->t_state != TCPS_LISTEN) 416 tcp_dooptions(tp, optp, optlen, ti, 417 &to); 418 419 /* 420 * Header prediction: check for the two common cases 421 * of a uni-directional data xfer. If the packet has 422 * no control flags, is in-sequence, the window didn't 423 * change and we're not retransmitting, it's a 424 * candidate. If the length is zero and the ack moved 425 * forward, we're the sender side of the xfer. Just 426 * free the data acked & wake any higher level process 427 * that was blocked waiting for space. If the length 428 * is non-zero and the ack didn't move, we're the 429 * receiver side. If we're getting packets in-order 430 * (the reassembly queue is empty), add the data to 431 * the socket buffer and note that we need a delayed ack. 432 * Make sure that the hidden state-flags are also off. 433 * Since we check for TCPS_ESTABLISHED above, it can only 434 * be TH_NEEDSYN. 435 */ 436 if (tp->t_state == TCPS_ESTABLISHED && 437 (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 438 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 439 ((to.to_flag & TOF_TS) == 0 || 440 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && 441 /* 442 * Using the CC option is compulsory if once started: 443 * the segment is OK if no T/TCP was negotiated or 444 * if the segment has a CC option equal to CCrecv 445 */ 446 ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) != (TF_REQ_CC|TF_RCVD_CC) || 447 (to.to_flag & TOF_CC) != 0 && to.to_cc == tp->cc_recv) && 448 ti->ti_seq == tp->rcv_nxt && 449 tiwin && tiwin == tp->snd_wnd && 450 tp->snd_nxt == tp->snd_max) { 451 452 /* 453 * If last ACK falls within this segment's sequence numbers, 454 * record the timestamp. 455 * NOTE that the test is modified according to the latest 456 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 457 */ 458 if ((to.to_flag & TOF_TS) != 0 && 459 SEQ_LEQ(ti->ti_seq, tp->last_ack_sent)) { 460 tp->ts_recent_age = tcp_now; 461 tp->ts_recent = to.to_tsval; 462 } 463 464 if (ti->ti_len == 0) { 465 if (SEQ_GT(ti->ti_ack, tp->snd_una) && 466 SEQ_LEQ(ti->ti_ack, tp->snd_max) && 467 tp->snd_cwnd >= tp->snd_wnd) { 468 /* 469 * this is a pure ack for outstanding data. 470 */ 471 ++tcpstat.tcps_predack; 472 if ((to.to_flag & TOF_TS) != 0) 473 tcp_xmit_timer(tp, 474 tcp_now - to.to_tsecr + 1); 475 else if (tp->t_rtt && 476 SEQ_GT(ti->ti_ack, tp->t_rtseq)) 477 tcp_xmit_timer(tp, tp->t_rtt); 478 acked = ti->ti_ack - tp->snd_una; 479 tcpstat.tcps_rcvackpack++; 480 tcpstat.tcps_rcvackbyte += acked; 481 sbdrop(&so->so_snd, acked); 482 tp->snd_una = ti->ti_ack; 483 m_freem(m); 484 485 /* 486 * If all outstanding data are acked, stop 487 * retransmit timer, otherwise restart timer 488 * using current (possibly backed-off) value. 489 * If process is waiting for space, 490 * wakeup/selwakeup/signal. If data 491 * are ready to send, let tcp_output 492 * decide between more output or persist. 493 */ 494 if (tp->snd_una == tp->snd_max) 495 tp->t_timer[TCPT_REXMT] = 0; 496 else if (tp->t_timer[TCPT_PERSIST] == 0) 497 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; 498 499 if (so->so_snd.sb_flags & SB_NOTIFY) 500 sowwakeup(so); 501 if (so->so_snd.sb_cc) 502 (void) tcp_output(tp); 503 return; 504 } 505 } else if (ti->ti_ack == tp->snd_una && 506 tp->seg_next == (struct tcpiphdr *)tp && 507 ti->ti_len <= sbspace(&so->so_rcv)) { 508 /* 509 * this is a pure, in-sequence data packet 510 * with nothing on the reassembly queue and 511 * we have enough buffer space to take it. 512 */ 513 ++tcpstat.tcps_preddat; 514 tp->rcv_nxt += ti->ti_len; 515 tcpstat.tcps_rcvpack++; 516 tcpstat.tcps_rcvbyte += ti->ti_len; 517 /* 518 * Drop TCP, IP headers and TCP options then add data 519 * to socket buffer. 520 */ 521 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); 522 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); 523 sbappend(&so->so_rcv, m); 524 sorwakeup(so); 525 /* 526 * If this is a short packet, then ACK now - with Nagel 527 * congestion avoidance sender won't send more until 528 * he gets an ACK. 529 */ 530 if (ti->ti_flags & TH_PUSH) { 531 tp->t_flags |= TF_ACKNOW; 532 tcp_output(tp); 533 } else { 534 tp->t_flags |= TF_DELACK; 535 } 536 return; 537 } 538 } 539 540 /* 541 * Drop TCP, IP headers and TCP options. 542 */ 543 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); 544 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); 545 546 /* 547 * Calculate amount of space in receive window, 548 * and then do TCP input processing. 549 * Receive window is amount of space in rcv queue, 550 * but not less than advertised window. 551 */ 552 { int win; 553 554 win = sbspace(&so->so_rcv); 555 if (win < 0) 556 win = 0; 557 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 558 } 559 560 switch (tp->t_state) { 561 562 /* 563 * If the state is LISTEN then ignore segment if it contains an RST. 564 * If the segment contains an ACK then it is bad and send a RST. 565 * If it does not contain a SYN then it is not interesting; drop it. 566 * Don't bother responding if the destination was a broadcast. 567 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial 568 * tp->iss, and send a segment: 569 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 570 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss. 571 * Fill in remote peer address fields if not previously specified. 572 * Enter SYN_RECEIVED state, and process any other fields of this 573 * segment in this state. 574 */ 575 case TCPS_LISTEN: { 576 struct mbuf *am; 577 register struct sockaddr_in *sin; 578 579 if (tiflags & TH_RST) 580 goto drop; 581 if (tiflags & TH_ACK) 582 goto dropwithreset; 583 if ((tiflags & TH_SYN) == 0) 584 goto drop; 585 /* 586 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN 587 * in_broadcast() should never return true on a received 588 * packet with M_BCAST not set. 589 */ 590 if (m->m_flags & (M_BCAST|M_MCAST) || 591 IN_MULTICAST(ntohl(ti->ti_dst.s_addr))) 592 goto drop; 593 am = m_get(M_DONTWAIT, MT_SONAME); /* XXX */ 594 if (am == NULL) 595 goto drop; 596 am->m_len = sizeof (struct sockaddr_in); 597 sin = mtod(am, struct sockaddr_in *); 598 sin->sin_family = AF_INET; 599 sin->sin_len = sizeof(*sin); 600 sin->sin_addr = ti->ti_src; 601 sin->sin_port = ti->ti_sport; 602 bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero)); 603 laddr = inp->inp_laddr; 604 if (inp->inp_laddr.s_addr == INADDR_ANY) 605 inp->inp_laddr = ti->ti_dst; 606 if (in_pcbconnect(inp, am)) { 607 inp->inp_laddr = laddr; 608 (void) m_free(am); 609 goto drop; 610 } 611 (void) m_free(am); 612 tp->t_template = tcp_template(tp); 613 if (tp->t_template == 0) { 614 tp = tcp_drop(tp, ENOBUFS); 615 dropsocket = 0; /* socket is already gone */ 616 goto drop; 617 } 618 if ((taop = tcp_gettaocache(inp)) == NULL) { 619 taop = &tao_noncached; 620 bzero(taop, sizeof(*taop)); 621 } 622 if (optp) 623 tcp_dooptions(tp, optp, optlen, ti, 624 &to); 625 if (iss) 626 tp->iss = iss; 627 else 628 tp->iss = tcp_iss; 629 tcp_iss += TCP_ISSINCR/2; 630 tp->irs = ti->ti_seq; 631 tcp_sendseqinit(tp); 632 tcp_rcvseqinit(tp); 633 /* 634 * Initialization of the tcpcb for transaction; 635 * set SND.WND = SEG.WND, 636 * initialize CCsend and CCrecv. 637 */ 638 tp->snd_wnd = tiwin; /* initial send-window */ 639 tp->cc_send = CC_INC(tcp_ccgen); 640 tp->cc_recv = to.to_cc; 641 /* 642 * Perform TAO test on incoming CC (SEG.CC) option, if any. 643 * - compare SEG.CC against cached CC from the same host, 644 * if any. 645 * - if SEG.CC > chached value, SYN must be new and is accepted 646 * immediately: save new CC in the cache, mark the socket 647 * connected, enter ESTABLISHED state, turn on flag to 648 * send a SYN in the next segment. 649 * A virtual advertised window is set in rcv_adv to 650 * initialize SWS prevention. Then enter normal segment 651 * processing: drop SYN, process data and FIN. 652 * - otherwise do a normal 3-way handshake. 653 */ 654 if ((to.to_flag & TOF_CC) != 0) { 655 if (taop->tao_cc != 0 && CC_GT(to.to_cc, taop->tao_cc)) { 656 taop->tao_cc = to.to_cc; 657 tp->t_state = TCPS_ESTABLISHED; 658 659 /* 660 * If there is a FIN, or if there is data and the 661 * connection is local, then delay SYN,ACK(SYN) in 662 * the hope of piggy-backing it on a response 663 * segment. Otherwise must send ACK now in case 664 * the other side is slow starting. 665 */ 666 if ((tiflags & TH_FIN) || (ti->ti_len != 0 && 667 in_localaddr(inp->inp_faddr))) 668 tp->t_flags |= (TF_DELACK | TF_NEEDSYN); 669 else 670 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 671 tp->rcv_adv += tp->rcv_wnd; 672 tcpstat.tcps_connects++; 673 soisconnected(so); 674 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; 675 dropsocket = 0; /* committed to socket */ 676 tcpstat.tcps_accepts++; 677 goto trimthenstep6; 678 } 679 /* else do standard 3-way handshake */ 680 } else { 681 /* 682 * No CC option, but maybe CC.NEW: 683 * invalidate cached value. 684 */ 685 taop->tao_cc = 0; 686 } 687 /* 688 * TAO test failed or there was no CC option, 689 * do a standard 3-way handshake. 690 */ 691 tp->t_flags |= TF_ACKNOW; 692 tp->t_state = TCPS_SYN_RECEIVED; 693 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; 694 dropsocket = 0; /* committed to socket */ 695 tcpstat.tcps_accepts++; 696 goto trimthenstep6; 697 } 698 699 /* 700 * If the state is SYN_SENT: 701 * if seg contains an ACK, but not for our SYN, drop the input. 702 * if seg contains a RST, then drop the connection. 703 * if seg does not contain SYN, then drop it. 704 * Otherwise this is an acceptable SYN segment 705 * initialize tp->rcv_nxt and tp->irs 706 * if seg contains ack then advance tp->snd_una 707 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 708 * arrange for segment to be acked (eventually) 709 * continue processing rest of data/controls, beginning with URG 710 */ 711 case TCPS_SYN_SENT: 712 if ((taop = tcp_gettaocache(inp)) == NULL) { 713 taop = &tao_noncached; 714 bzero(taop, sizeof(*taop)); 715 } 716 717 if ((tiflags & TH_ACK) && 718 (SEQ_LEQ(ti->ti_ack, tp->iss) || 719 SEQ_GT(ti->ti_ack, tp->snd_max))) { 720 /* 721 * If we have a cached CCsent for the remote host, 722 * hence we haven't just crashed and restarted, 723 * do not send a RST. This may be a retransmission 724 * from the other side after our earlier ACK was lost. 725 * Our new SYN, when it arrives, will serve as the 726 * needed ACK. 727 */ 728 if (taop->tao_ccsent != 0) 729 goto drop; 730 else 731 goto dropwithreset; 732 } 733 if (tiflags & TH_RST) { 734 if (tiflags & TH_ACK) 735 tp = tcp_drop(tp, ECONNREFUSED); 736 goto drop; 737 } 738 if ((tiflags & TH_SYN) == 0) 739 goto drop; 740 tp->snd_wnd = ti->ti_win; /* initial send window */ 741 tp->cc_recv = to.to_cc; /* foreign CC */ 742 743 tp->irs = ti->ti_seq; 744 tcp_rcvseqinit(tp); 745 if (tiflags & TH_ACK && SEQ_GT(ti->ti_ack, tp->iss)) { 746 tcpstat.tcps_connects++; 747 soisconnected(so); 748 /* Do window scaling on this connection? */ 749 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 750 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 751 tp->snd_scale = tp->requested_s_scale; 752 tp->rcv_scale = tp->request_r_scale; 753 } 754 /* 755 * Our SYN was acked. If segment contains CC.ECHO 756 * option, check it to make sure this segment really 757 * matches our SYN. If not, just drop it as old 758 * duplicate, but send an RST if we're still playing 759 * by the old rules. 760 */ 761 if ((to.to_flag & TOF_CCECHO) && 762 tp->cc_send != to.to_ccecho) { 763 if (taop->tao_ccsent != 0) 764 goto drop; 765 else 766 goto dropwithreset; 767 } 768 /* Segment is acceptable, update cache if undefined. */ 769 if (taop->tao_ccsent == 0) 770 taop->tao_ccsent = to.to_ccecho; 771 772 tp->rcv_adv += tp->rcv_wnd; 773 tp->snd_una++; /* SYN is acked */ 774 /* 775 * If there's data, delay ACK; if there's also a FIN 776 * ACKNOW will be turned on later. 777 */ 778 if (ti->ti_len != 0) 779 tp->t_flags |= TF_DELACK; 780 else 781 tp->t_flags |= TF_ACKNOW; 782 /* 783 * Received <SYN,ACK> in SYN_SENT[*] state. 784 * Transitions: 785 * SYN_SENT --> ESTABLISHED 786 * SYN_SENT* --> FIN_WAIT_1 787 */ 788 if (tp->t_flags & TF_NEEDFIN) { 789 tp->t_state = TCPS_FIN_WAIT_1; 790 tp->t_flags &= ~TF_NEEDFIN; 791 tiflags &= ~TH_SYN; 792 } else 793 tp->t_state = TCPS_ESTABLISHED; 794 795 } else { 796 /* 797 * Received initial SYN in SYN-SENT[*] state => simul- 798 * taneous open. If segment contains CC option and there is 799 * a cached CC, apply TAO test; if it succeeds, connection is 800 * half-synchronized. Otherwise, do 3-way handshake: 801 * SYN-SENT -> SYN-RECEIVED 802 * SYN-SENT* -> SYN-RECEIVED* 803 * If there was no CC option, clear cached CC value. 804 */ 805 tp->t_flags |= TF_ACKNOW; 806 tp->t_timer[TCPT_REXMT] = 0; 807 if (to.to_flag & TOF_CC) { 808 if (taop->tao_cc != 0 && 809 CC_GT(to.to_cc, taop->tao_cc)) { 810 /* 811 * update cache and make transition: 812 * SYN-SENT -> ESTABLISHED* 813 * SYN-SENT* -> FIN-WAIT-1* 814 */ 815 taop->tao_cc = to.to_cc; 816 if (tp->t_flags & TF_NEEDFIN) { 817 tp->t_state = TCPS_FIN_WAIT_1; 818 tp->t_flags &= ~TF_NEEDFIN; 819 } else 820 tp->t_state = TCPS_ESTABLISHED; 821 tp->t_flags |= TF_NEEDSYN; 822 } else 823 tp->t_state = TCPS_SYN_RECEIVED; 824 } else { 825 /* CC.NEW or no option => invalidate cache */ 826 taop->tao_cc = 0; 827 tp->t_state = TCPS_SYN_RECEIVED; 828 } 829 } 830 831trimthenstep6: 832 /* 833 * Advance ti->ti_seq to correspond to first data byte. 834 * If data, trim to stay within window, 835 * dropping FIN if necessary. 836 */ 837 ti->ti_seq++; 838 if (ti->ti_len > tp->rcv_wnd) { 839 todrop = ti->ti_len - tp->rcv_wnd; 840 m_adj(m, -todrop); 841 ti->ti_len = tp->rcv_wnd; 842 tiflags &= ~TH_FIN; 843 tcpstat.tcps_rcvpackafterwin++; 844 tcpstat.tcps_rcvbyteafterwin += todrop; 845 } 846 tp->snd_wl1 = ti->ti_seq - 1; 847 tp->rcv_up = ti->ti_seq; 848 /* 849 * Client side of transaction: already sent SYN and data. 850 * If the remote host used T/TCP to validate the SYN, 851 * our data will be ACK'd; if so, enter normal data segment 852 * processing in the middle of step 5, ack processing. 853 * Otherwise, goto step 6. 854 */ 855 if (tiflags & TH_ACK) 856 goto process_ACK; 857 goto step6; 858 /* 859 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 860 * if segment contains a SYN and CC [not CC.NEW] option: 861 * if state == TIME_WAIT and connection duration > MSL, 862 * drop packet and send RST; 863 * 864 * if SEG.CC > CCrecv then is new SYN, and can implicitly 865 * ack the FIN (and data) in retransmission queue. 866 * Complete close and delete TCPCB. Then reprocess 867 * segment, hoping to find new TCPCB in LISTEN state; 868 * 869 * else must be old SYN; drop it. 870 * else do normal processing. 871 */ 872 case TCPS_LAST_ACK: 873 case TCPS_CLOSING: 874 case TCPS_TIME_WAIT: 875 if ((tiflags & TH_SYN) && 876 (to.to_flag & TOF_CC) && tp->cc_recv != 0) { 877 if (tp->t_state == TCPS_TIME_WAIT && 878 tp->t_duration > TCPTV_MSL) 879 goto dropwithreset; 880 if (CC_GT(to.to_cc, tp->cc_recv)) { 881 tp = tcp_close(tp); 882 goto findpcb; 883 } 884 else 885 goto drop; 886 } 887 break; /* continue normal processing */ 888 } 889 890 /* 891 * States other than LISTEN or SYN_SENT. 892 * First check timestamp, if present. 893 * Then check the connection count, if present. 894 * Then check that at least some bytes of segment are within 895 * receive window. If segment begins before rcv_nxt, 896 * drop leading data (and SYN); if nothing left, just ack. 897 * 898 * RFC 1323 PAWS: If we have a timestamp reply on this segment 899 * and it's less than ts_recent, drop it. 900 */ 901 if ((to.to_flag & TOF_TS) != 0 && (tiflags & TH_RST) == 0 && 902 tp->ts_recent && TSTMP_LT(to.to_tsval, tp->ts_recent)) { 903 904 /* Check to see if ts_recent is over 24 days old. */ 905 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) { 906 /* 907 * Invalidate ts_recent. If this segment updates 908 * ts_recent, the age will be reset later and ts_recent 909 * will get a valid value. If it does not, setting 910 * ts_recent to zero will at least satisfy the 911 * requirement that zero be placed in the timestamp 912 * echo reply when ts_recent isn't valid. The 913 * age isn't reset until we get a valid ts_recent 914 * because we don't want out-of-order segments to be 915 * dropped when ts_recent is old. 916 */ 917 tp->ts_recent = 0; 918 } else { 919 tcpstat.tcps_rcvduppack++; 920 tcpstat.tcps_rcvdupbyte += ti->ti_len; 921 tcpstat.tcps_pawsdrop++; 922 goto dropafterack; 923 } 924 } 925 926 /* 927 * T/TCP mechanism 928 * If T/TCP was negotiated and the segment doesn't have CC, 929 * or if it's CC is wrong then drop the segment. 930 * RST segments do not have to comply with this. 931 */ 932 if ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) == (TF_REQ_CC|TF_RCVD_CC) && 933 ((to.to_flag & TOF_CC) == 0 || tp->cc_recv != to.to_cc) && 934 (tiflags & TH_RST) == 0) 935 goto dropafterack; 936 937 todrop = tp->rcv_nxt - ti->ti_seq; 938 if (todrop > 0) { 939 if (tiflags & TH_SYN) { 940 tiflags &= ~TH_SYN; 941 ti->ti_seq++; 942 if (ti->ti_urp > 1) 943 ti->ti_urp--; 944 else 945 tiflags &= ~TH_URG; 946 todrop--; 947 } 948 /* 949 * Following if statement from Stevens, vol. 2, p. 960. 950 */ 951 if (todrop > ti->ti_len 952 || (todrop == ti->ti_len && (tiflags & TH_FIN) == 0)) { 953 /* 954 * Any valid FIN must be to the left of the window. 955 * At this point the FIN must be a duplicate or out 956 * of sequence; drop it. 957 */ 958 tiflags &= ~TH_FIN; 959 960 /* 961 * Send an ACK to resynchronize and drop any data. 962 * But keep on processing for RST or ACK. 963 */ 964 tp->t_flags |= TF_ACKNOW; 965 todrop = ti->ti_len; 966 tcpstat.tcps_rcvduppack++; 967 tcpstat.tcps_rcvdupbyte += todrop; 968 } else { 969 tcpstat.tcps_rcvpartduppack++; 970 tcpstat.tcps_rcvpartdupbyte += todrop; 971 } 972 m_adj(m, todrop); 973 ti->ti_seq += todrop; 974 ti->ti_len -= todrop; 975 if (ti->ti_urp > todrop) 976 ti->ti_urp -= todrop; 977 else { 978 tiflags &= ~TH_URG; 979 ti->ti_urp = 0; 980 } 981 } 982 983 /* 984 * If new data are received on a connection after the 985 * user processes are gone, then RST the other end. 986 */ 987 if ((so->so_state & SS_NOFDREF) && 988 tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) { 989 tp = tcp_close(tp); 990 tcpstat.tcps_rcvafterclose++; 991 goto dropwithreset; 992 } 993 994 /* 995 * If segment ends after window, drop trailing data 996 * (and PUSH and FIN); if nothing left, just ACK. 997 */ 998 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd); 999 if (todrop > 0) { 1000 tcpstat.tcps_rcvpackafterwin++; 1001 if (todrop >= ti->ti_len) { 1002 tcpstat.tcps_rcvbyteafterwin += ti->ti_len; 1003 /* 1004 * If a new connection request is received 1005 * while in TIME_WAIT, drop the old connection 1006 * and start over if the sequence numbers 1007 * are above the previous ones. 1008 */ 1009 if (tiflags & TH_SYN && 1010 tp->t_state == TCPS_TIME_WAIT && 1011 SEQ_GT(ti->ti_seq, tp->rcv_nxt)) { 1012 iss = tp->rcv_nxt + TCP_ISSINCR; 1013 tp = tcp_close(tp); 1014 goto findpcb; 1015 } 1016 /* 1017 * If window is closed can only take segments at 1018 * window edge, and have to drop data and PUSH from 1019 * incoming segments. Continue processing, but 1020 * remember to ack. Otherwise, drop segment 1021 * and ack. 1022 */ 1023 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) { 1024 tp->t_flags |= TF_ACKNOW; 1025 tcpstat.tcps_rcvwinprobe++; 1026 } else 1027 goto dropafterack; 1028 } else 1029 tcpstat.tcps_rcvbyteafterwin += todrop; 1030 m_adj(m, -todrop); 1031 ti->ti_len -= todrop; 1032 tiflags &= ~(TH_PUSH|TH_FIN); 1033 } 1034 1035 /* 1036 * If last ACK falls within this segment's sequence numbers, 1037 * record its timestamp. 1038 * NOTE that the test is modified according to the latest 1039 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1040 */ 1041 if ((to.to_flag & TOF_TS) != 0 && 1042 SEQ_LEQ(ti->ti_seq, tp->last_ack_sent)) { 1043 tp->ts_recent_age = tcp_now; 1044 tp->ts_recent = to.to_tsval; 1045 } 1046 1047 /* 1048 * If the RST bit is set examine the state: 1049 * SYN_RECEIVED STATE: 1050 * If passive open, return to LISTEN state. 1051 * If active open, inform user that connection was refused. 1052 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES: 1053 * Inform user that connection was reset, and close tcb. 1054 * CLOSING, LAST_ACK, TIME_WAIT STATES 1055 * Close the tcb. 1056 */ 1057 if (tiflags&TH_RST) switch (tp->t_state) { 1058 1059 case TCPS_SYN_RECEIVED: 1060 so->so_error = ECONNREFUSED; 1061 goto close; 1062 1063 case TCPS_ESTABLISHED: 1064 case TCPS_FIN_WAIT_1: 1065 case TCPS_FIN_WAIT_2: 1066 case TCPS_CLOSE_WAIT: 1067 so->so_error = ECONNRESET; 1068 close: 1069 tp->t_state = TCPS_CLOSED; 1070 tcpstat.tcps_drops++; 1071 tp = tcp_close(tp); 1072 goto drop; 1073 1074 case TCPS_CLOSING: 1075 case TCPS_LAST_ACK: 1076 case TCPS_TIME_WAIT: 1077 tp = tcp_close(tp); 1078 goto drop; 1079 } 1080 1081 /* 1082 * If a SYN is in the window, then this is an 1083 * error and we send an RST and drop the connection. 1084 */ 1085 if (tiflags & TH_SYN) { 1086 tp = tcp_drop(tp, ECONNRESET); 1087 goto dropwithreset; 1088 } 1089 1090 /* 1091 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1092 * flag is on (half-synchronized state), then queue data for 1093 * later processing; else drop segment and return. 1094 */ 1095 if ((tiflags & TH_ACK) == 0) { 1096 if (tp->t_state == TCPS_SYN_RECEIVED || 1097 (tp->t_flags & TF_NEEDSYN)) 1098 goto step6; 1099 else 1100 goto drop; 1101 } 1102 1103 /* 1104 * Ack processing. 1105 */ 1106 switch (tp->t_state) { 1107 1108 /* 1109 * In SYN_RECEIVED state if the ack ACKs our SYN then enter 1110 * ESTABLISHED state and continue processing, otherwise 1111 * send an RST. 1112 */ 1113 case TCPS_SYN_RECEIVED: 1114 if (SEQ_GT(tp->snd_una, ti->ti_ack) || 1115 SEQ_GT(ti->ti_ack, tp->snd_max)) 1116 goto dropwithreset; 1117 1118 tcpstat.tcps_connects++; 1119 soisconnected(so); 1120 /* Do window scaling? */ 1121 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1122 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1123 tp->snd_scale = tp->requested_s_scale; 1124 tp->rcv_scale = tp->request_r_scale; 1125 } 1126 /* 1127 * Upon successful completion of 3-way handshake, 1128 * update cache.CC if it was undefined, pass any queued 1129 * data to the user, and advance state appropriately. 1130 */ 1131 if ((taop = tcp_gettaocache(inp)) != NULL && 1132 taop->tao_cc == 0) 1133 taop->tao_cc = tp->cc_recv; 1134 1135 /* 1136 * Make transitions: 1137 * SYN-RECEIVED -> ESTABLISHED 1138 * SYN-RECEIVED* -> FIN-WAIT-1 1139 */ 1140 if (tp->t_flags & TF_NEEDFIN) { 1141 tp->t_state = TCPS_FIN_WAIT_1; 1142 tp->t_flags &= ~TF_NEEDFIN; 1143 } else 1144 tp->t_state = TCPS_ESTABLISHED; 1145 /* 1146 * If segment contains data or ACK, will call tcp_reass() 1147 * later; if not, do so now to pass queued data to user. 1148 */ 1149 if (ti->ti_len == 0 && (tiflags & TH_FIN) == 0) 1150 (void) tcp_reass(tp, (struct tcpiphdr *)0, 1151 (struct mbuf *)0); 1152 tp->snd_wl1 = ti->ti_seq - 1; 1153 /* fall into ... */ 1154 1155 /* 1156 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1157 * ACKs. If the ack is in the range 1158 * tp->snd_una < ti->ti_ack <= tp->snd_max 1159 * then advance tp->snd_una to ti->ti_ack and drop 1160 * data from the retransmission queue. If this ACK reflects 1161 * more up to date window information we update our window information. 1162 */ 1163 case TCPS_ESTABLISHED: 1164 case TCPS_FIN_WAIT_1: 1165 case TCPS_FIN_WAIT_2: 1166 case TCPS_CLOSE_WAIT: 1167 case TCPS_CLOSING: 1168 case TCPS_LAST_ACK: 1169 case TCPS_TIME_WAIT: 1170 1171 if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) { 1172 if (ti->ti_len == 0 && tiwin == tp->snd_wnd) { 1173 tcpstat.tcps_rcvdupack++; 1174 /* 1175 * If we have outstanding data (other than 1176 * a window probe), this is a completely 1177 * duplicate ack (ie, window info didn't 1178 * change), the ack is the biggest we've 1179 * seen and we've seen exactly our rexmt 1180 * threshhold of them, assume a packet 1181 * has been dropped and retransmit it. 1182 * Kludge snd_nxt & the congestion 1183 * window so we send only this one 1184 * packet. 1185 * 1186 * We know we're losing at the current 1187 * window size so do congestion avoidance 1188 * (set ssthresh to half the current window 1189 * and pull our congestion window back to 1190 * the new ssthresh). 1191 * 1192 * Dup acks mean that packets have left the 1193 * network (they're now cached at the receiver) 1194 * so bump cwnd by the amount in the receiver 1195 * to keep a constant cwnd packets in the 1196 * network. 1197 */ 1198 if (tp->t_timer[TCPT_REXMT] == 0 || 1199 ti->ti_ack != tp->snd_una) 1200 tp->t_dupacks = 0; 1201 else if (++tp->t_dupacks == tcprexmtthresh) { 1202 tcp_seq onxt = tp->snd_nxt; 1203 u_int win = 1204 min(tp->snd_wnd, tp->snd_cwnd) / 2 / 1205 tp->t_maxseg; 1206 1207 if (win < 2) 1208 win = 2; 1209 tp->snd_ssthresh = win * tp->t_maxseg; 1210 tp->t_timer[TCPT_REXMT] = 0; 1211 tp->t_rtt = 0; 1212 tp->snd_nxt = ti->ti_ack; 1213 tp->snd_cwnd = tp->t_maxseg; 1214 (void) tcp_output(tp); 1215 tp->snd_cwnd = tp->snd_ssthresh + 1216 tp->t_maxseg * tp->t_dupacks; 1217 if (SEQ_GT(onxt, tp->snd_nxt)) 1218 tp->snd_nxt = onxt; 1219 goto drop; 1220 } else if (tp->t_dupacks > tcprexmtthresh) { 1221 tp->snd_cwnd += tp->t_maxseg; 1222 (void) tcp_output(tp); 1223 goto drop; 1224 } 1225 } else 1226 tp->t_dupacks = 0; 1227 break; 1228 } 1229 /* 1230 * If the congestion window was inflated to account 1231 * for the other side's cached packets, retract it. 1232 */ 1233 if (tp->t_dupacks > tcprexmtthresh && 1234 tp->snd_cwnd > tp->snd_ssthresh) 1235 tp->snd_cwnd = tp->snd_ssthresh; 1236 tp->t_dupacks = 0; 1237 if (SEQ_GT(ti->ti_ack, tp->snd_max)) { 1238 tcpstat.tcps_rcvacktoomuch++; 1239 goto dropafterack; 1240 } 1241 /* 1242 * If we reach this point, ACK is not a duplicate, 1243 * i.e., it ACKs something we sent. 1244 */ 1245 if (tp->t_flags & TF_NEEDSYN) { 1246 /* 1247 * T/TCP: Connection was half-synchronized, and our 1248 * SYN has been ACK'd (so connection is now fully 1249 * synchronized). Go to non-starred state and 1250 * increment snd_una for ACK of SYN. 1251 */ 1252 tp->t_flags &= ~TF_NEEDSYN; 1253 tp->snd_una++; 1254 } 1255 1256process_ACK: 1257 acked = ti->ti_ack - tp->snd_una; 1258 tcpstat.tcps_rcvackpack++; 1259 tcpstat.tcps_rcvackbyte += acked; 1260 1261 /* 1262 * If we have a timestamp reply, update smoothed 1263 * round trip time. If no timestamp is present but 1264 * transmit timer is running and timed sequence 1265 * number was acked, update smoothed round trip time. 1266 * Since we now have an rtt measurement, cancel the 1267 * timer backoff (cf., Phil Karn's retransmit alg.). 1268 * Recompute the initial retransmit timer. 1269 */ 1270 if (to.to_flag & TOF_TS) 1271 tcp_xmit_timer(tp, tcp_now - to.to_tsecr + 1); 1272 else if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq)) 1273 tcp_xmit_timer(tp,tp->t_rtt); 1274 1275 /* 1276 * If all outstanding data is acked, stop retransmit 1277 * timer and remember to restart (more output or persist). 1278 * If there is more data to be acked, restart retransmit 1279 * timer, using current (possibly backed-off) value. 1280 */ 1281 if (ti->ti_ack == tp->snd_max) { 1282 tp->t_timer[TCPT_REXMT] = 0; 1283 needoutput = 1; 1284 } else if (tp->t_timer[TCPT_PERSIST] == 0) 1285 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; 1286 1287 /* 1288 * If no data (only SYN) was ACK'd, 1289 * skip rest of ACK processing. 1290 */ 1291 if (acked == 0) 1292 goto step6; 1293 1294 /* 1295 * When new data is acked, open the congestion window. 1296 * If the window gives us less than ssthresh packets 1297 * in flight, open exponentially (maxseg per packet). 1298 * Otherwise open linearly: maxseg per window 1299 * (maxseg^2 / cwnd per packet). 1300 */ 1301 { 1302 register u_int cw = tp->snd_cwnd; 1303 register u_int incr = tp->t_maxseg; 1304 1305 if (cw > tp->snd_ssthresh) 1306 incr = incr * incr / cw; 1307 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale); 1308 } 1309 if (acked > so->so_snd.sb_cc) { 1310 tp->snd_wnd -= so->so_snd.sb_cc; 1311 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc); 1312 ourfinisacked = 1; 1313 } else { 1314 sbdrop(&so->so_snd, acked); 1315 tp->snd_wnd -= acked; 1316 ourfinisacked = 0; 1317 } 1318 if (so->so_snd.sb_flags & SB_NOTIFY) 1319 sowwakeup(so); 1320 tp->snd_una = ti->ti_ack; 1321 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 1322 tp->snd_nxt = tp->snd_una; 1323 1324 switch (tp->t_state) { 1325 1326 /* 1327 * In FIN_WAIT_1 STATE in addition to the processing 1328 * for the ESTABLISHED state if our FIN is now acknowledged 1329 * then enter FIN_WAIT_2. 1330 */ 1331 case TCPS_FIN_WAIT_1: 1332 if (ourfinisacked) { 1333 /* 1334 * If we can't receive any more 1335 * data, then closing user can proceed. 1336 * Starting the timer is contrary to the 1337 * specification, but if we don't get a FIN 1338 * we'll hang forever. 1339 */ 1340 if (so->so_state & SS_CANTRCVMORE) { 1341 soisdisconnected(so); 1342 tp->t_timer[TCPT_2MSL] = tcp_maxidle; 1343 } 1344 tp->t_state = TCPS_FIN_WAIT_2; 1345 } 1346 break; 1347 1348 /* 1349 * In CLOSING STATE in addition to the processing for 1350 * the ESTABLISHED state if the ACK acknowledges our FIN 1351 * then enter the TIME-WAIT state, otherwise ignore 1352 * the segment. 1353 */ 1354 case TCPS_CLOSING: 1355 if (ourfinisacked) { 1356 tp->t_state = TCPS_TIME_WAIT; 1357 tcp_canceltimers(tp); 1358 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 1359 if (tp->cc_recv != 0 && 1360 tp->t_duration < TCPTV_MSL) 1361 tp->t_timer[TCPT_2MSL] = 1362 tp->t_rxtcur * TCPTV_TWTRUNC; 1363 else 1364 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; 1365 soisdisconnected(so); 1366 } 1367 break; 1368 1369 /* 1370 * In LAST_ACK, we may still be waiting for data to drain 1371 * and/or to be acked, as well as for the ack of our FIN. 1372 * If our FIN is now acknowledged, delete the TCB, 1373 * enter the closed state and return. 1374 */ 1375 case TCPS_LAST_ACK: 1376 if (ourfinisacked) { 1377 tp = tcp_close(tp); 1378 goto drop; 1379 } 1380 break; 1381 1382 /* 1383 * In TIME_WAIT state the only thing that should arrive 1384 * is a retransmission of the remote FIN. Acknowledge 1385 * it and restart the finack timer. 1386 */ 1387 case TCPS_TIME_WAIT: 1388 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; 1389 goto dropafterack; 1390 } 1391 } 1392 1393step6: 1394 /* 1395 * Update window information. 1396 * Don't look at window if no ACK: TAC's send garbage on first SYN. 1397 */ 1398 if ((tiflags & TH_ACK) && 1399 (SEQ_LT(tp->snd_wl1, ti->ti_seq) || 1400 (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) || 1401 (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd))))) { 1402 /* keep track of pure window updates */ 1403 if (ti->ti_len == 0 && 1404 tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd) 1405 tcpstat.tcps_rcvwinupd++; 1406 tp->snd_wnd = tiwin; 1407 tp->snd_wl1 = ti->ti_seq; 1408 tp->snd_wl2 = ti->ti_ack; 1409 if (tp->snd_wnd > tp->max_sndwnd) 1410 tp->max_sndwnd = tp->snd_wnd; 1411 needoutput = 1; 1412 } 1413 1414 /* 1415 * Process segments with URG. 1416 */ 1417 if ((tiflags & TH_URG) && ti->ti_urp && 1418 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 1419 /* 1420 * This is a kludge, but if we receive and accept 1421 * random urgent pointers, we'll crash in 1422 * soreceive. It's hard to imagine someone 1423 * actually wanting to send this much urgent data. 1424 */ 1425 if (ti->ti_urp + so->so_rcv.sb_cc > sb_max) { 1426 ti->ti_urp = 0; /* XXX */ 1427 tiflags &= ~TH_URG; /* XXX */ 1428 goto dodata; /* XXX */ 1429 } 1430 /* 1431 * If this segment advances the known urgent pointer, 1432 * then mark the data stream. This should not happen 1433 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 1434 * a FIN has been received from the remote side. 1435 * In these states we ignore the URG. 1436 * 1437 * According to RFC961 (Assigned Protocols), 1438 * the urgent pointer points to the last octet 1439 * of urgent data. We continue, however, 1440 * to consider it to indicate the first octet 1441 * of data past the urgent section as the original 1442 * spec states (in one of two places). 1443 */ 1444 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) { 1445 tp->rcv_up = ti->ti_seq + ti->ti_urp; 1446 so->so_oobmark = so->so_rcv.sb_cc + 1447 (tp->rcv_up - tp->rcv_nxt) - 1; 1448 if (so->so_oobmark == 0) 1449 so->so_state |= SS_RCVATMARK; 1450 sohasoutofband(so); 1451 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 1452 } 1453 /* 1454 * Remove out of band data so doesn't get presented to user. 1455 * This can happen independent of advancing the URG pointer, 1456 * but if two URG's are pending at once, some out-of-band 1457 * data may creep in... ick. 1458 */ 1459 if (ti->ti_urp <= (u_long)ti->ti_len 1460#ifdef SO_OOBINLINE 1461 && (so->so_options & SO_OOBINLINE) == 0 1462#endif 1463 ) 1464 tcp_pulloutofband(so, ti, m); 1465 } else 1466 /* 1467 * If no out of band data is expected, 1468 * pull receive urgent pointer along 1469 * with the receive window. 1470 */ 1471 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 1472 tp->rcv_up = tp->rcv_nxt; 1473dodata: /* XXX */ 1474 1475 /* 1476 * Process the segment text, merging it into the TCP sequencing queue, 1477 * and arranging for acknowledgment of receipt if necessary. 1478 * This process logically involves adjusting tp->rcv_wnd as data 1479 * is presented to the user (this happens in tcp_usrreq.c, 1480 * case PRU_RCVD). If a FIN has already been received on this 1481 * connection then we just ignore the text. 1482 */ 1483 if ((ti->ti_len || (tiflags&TH_FIN)) && 1484 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 1485 TCP_REASS(tp, ti, m, so, tiflags); 1486 /* 1487 * Note the amount of data that peer has sent into 1488 * our window, in order to estimate the sender's 1489 * buffer size. 1490 */ 1491 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 1492 } else { 1493 m_freem(m); 1494 tiflags &= ~TH_FIN; 1495 } 1496 1497 /* 1498 * If FIN is received ACK the FIN and let the user know 1499 * that the connection is closing. 1500 */ 1501 if (tiflags & TH_FIN) { 1502 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 1503 socantrcvmore(so); 1504 /* 1505 * If connection is half-synchronized 1506 * (ie SEND_SYN flag on) then delay ACK, 1507 * so it may be piggybacked when SYN is sent. 1508 * Otherwise, since we received a FIN then no 1509 * more input can be expected, send ACK now. 1510 */ 1511 if (tp->t_flags & TF_NEEDSYN) 1512 tp->t_flags |= TF_DELACK; 1513 else 1514 tp->t_flags |= TF_ACKNOW; 1515 tp->rcv_nxt++; 1516 } 1517 switch (tp->t_state) { 1518 1519 /* 1520 * In SYN_RECEIVED and ESTABLISHED STATES 1521 * enter the CLOSE_WAIT state. 1522 */ 1523 case TCPS_SYN_RECEIVED: 1524 case TCPS_ESTABLISHED: 1525 tp->t_state = TCPS_CLOSE_WAIT; 1526 break; 1527 1528 /* 1529 * If still in FIN_WAIT_1 STATE FIN has not been acked so 1530 * enter the CLOSING state. 1531 */ 1532 case TCPS_FIN_WAIT_1: 1533 tp->t_state = TCPS_CLOSING; 1534 break; 1535 1536 /* 1537 * In FIN_WAIT_2 state enter the TIME_WAIT state, 1538 * starting the time-wait timer, turning off the other 1539 * standard timers. 1540 */ 1541 case TCPS_FIN_WAIT_2: 1542 tp->t_state = TCPS_TIME_WAIT; 1543 tcp_canceltimers(tp); 1544 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 1545 if (tp->cc_recv != 0 && 1546 tp->t_duration < TCPTV_MSL) { 1547 tp->t_timer[TCPT_2MSL] = 1548 tp->t_rxtcur * TCPTV_TWTRUNC; 1549 /* For transaction client, force ACK now. */ 1550 tp->t_flags |= TF_ACKNOW; 1551 } 1552 else 1553 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; 1554 soisdisconnected(so); 1555 break; 1556 1557 /* 1558 * In TIME_WAIT state restart the 2 MSL time_wait timer. 1559 */ 1560 case TCPS_TIME_WAIT: 1561 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; 1562 break; 1563 } 1564 } 1565#ifdef TCPDEBUG 1566 if (so->so_options & SO_DEBUG) 1567 tcp_trace(TA_INPUT, ostate, tp, &tcp_saveti, 0); 1568#endif 1569 1570 /* 1571 * If this is a short packet, then ACK now - with Nagel 1572 * congestion avoidance sender won't send more until 1573 * he gets an ACK. 1574 */ 1575 if (ti->ti_flags & TH_PUSH) 1576 tp->t_flags |= TF_ACKNOW; 1577 1578 /* 1579 * Return any desired output. 1580 */ 1581 if (needoutput || (tp->t_flags & TF_ACKNOW)) 1582 (void) tcp_output(tp); 1583 return; 1584 1585dropafterack: 1586 /* 1587 * Generate an ACK dropping incoming segment if it occupies 1588 * sequence space, where the ACK reflects our state. 1589 */ 1590 if (tiflags & TH_RST) 1591 goto drop; 1592#ifdef TCPDEBUG 1593 if (so->so_options & SO_DEBUG) 1594 tcp_trace(TA_DROP, ostate, tp, &tcp_saveti, 0); 1595#endif 1596 m_freem(m); 1597 tp->t_flags |= TF_ACKNOW; 1598 (void) tcp_output(tp); 1599 return; 1600 1601dropwithreset: 1602 /* 1603 * Generate a RST, dropping incoming segment. 1604 * Make ACK acceptable to originator of segment. 1605 * Don't bother to respond if destination was broadcast/multicast. 1606 */ 1607 if ((tiflags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST) || 1608 IN_MULTICAST(ntohl(ti->ti_dst.s_addr))) 1609 goto drop; 1610#ifdef TCPDEBUG 1611 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 1612 tcp_trace(TA_DROP, ostate, tp, &tcp_saveti, 0); 1613#endif 1614 if (tiflags & TH_ACK) 1615 tcp_respond(tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST); 1616 else { 1617 if (tiflags & TH_SYN) 1618 ti->ti_len++; 1619 tcp_respond(tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0, 1620 TH_RST|TH_ACK); 1621 } 1622 /* destroy temporarily created socket */ 1623 if (dropsocket) 1624 (void) soabort(so); 1625 return; 1626 1627drop: 1628 /* 1629 * Drop space held by incoming segment and return. 1630 */ 1631#ifdef TCPDEBUG 1632 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 1633 tcp_trace(TA_DROP, ostate, tp, &tcp_saveti, 0); 1634#endif 1635 m_freem(m); 1636 /* destroy temporarily created socket */ 1637 if (dropsocket) 1638 (void) soabort(so); 1639 return; 1640#ifndef TUBA_INCLUDE 1641} 1642 1643void 1644tcp_dooptions(tp, cp, cnt, ti, to) 1645 struct tcpcb *tp; 1646 u_char *cp; 1647 int cnt; 1648 struct tcpiphdr *ti; 1649 struct tcpopt *to; 1650{ 1651 u_short mss = 0; 1652 int opt, optlen; 1653 1654 for (; cnt > 0; cnt -= optlen, cp += optlen) { 1655 opt = cp[0]; 1656 if (opt == TCPOPT_EOL) 1657 break; 1658 if (opt == TCPOPT_NOP) 1659 optlen = 1; 1660 else { 1661 optlen = cp[1]; 1662 if (optlen <= 0) 1663 break; 1664 } 1665 switch (opt) { 1666 1667 default: 1668 continue; 1669 1670 case TCPOPT_MAXSEG: 1671 if (optlen != TCPOLEN_MAXSEG) 1672 continue; 1673 if (!(ti->ti_flags & TH_SYN)) 1674 continue; 1675 bcopy((char *) cp + 2, (char *) &mss, sizeof(mss)); 1676 NTOHS(mss); 1677 break; 1678 1679 case TCPOPT_WINDOW: 1680 if (optlen != TCPOLEN_WINDOW) 1681 continue; 1682 if (!(ti->ti_flags & TH_SYN)) 1683 continue; 1684 tp->t_flags |= TF_RCVD_SCALE; 1685 tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); 1686 break; 1687 1688 case TCPOPT_TIMESTAMP: 1689 if (optlen != TCPOLEN_TIMESTAMP) 1690 continue; 1691 to->to_flag |= TOF_TS; 1692 bcopy((char *)cp + 2, 1693 (char *)&to->to_tsval, sizeof(to->to_tsval)); 1694 NTOHL(to->to_tsval); 1695 bcopy((char *)cp + 6, 1696 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 1697 NTOHL(to->to_tsecr); 1698 1699 /* 1700 * A timestamp received in a SYN makes 1701 * it ok to send timestamp requests and replies. 1702 */ 1703 if (ti->ti_flags & TH_SYN) { 1704 tp->t_flags |= TF_RCVD_TSTMP; 1705 tp->ts_recent = to->to_tsval; 1706 tp->ts_recent_age = tcp_now; 1707 } 1708 break; 1709 case TCPOPT_CC: 1710 if (optlen != TCPOLEN_CC) 1711 continue; 1712 to->to_flag |= TCPOPT_CC; 1713 bcopy((char *)cp + 2, 1714 (char *)&to->to_cc, sizeof(to->to_cc)); 1715 NTOHL(to->to_cc); 1716 /* 1717 * A CC or CC.new option received in a SYN makes 1718 * it ok to send CC in subsequent segments. 1719 */ 1720 if (ti->ti_flags & TH_SYN) 1721 tp->t_flags |= TF_RCVD_CC; 1722 break; 1723 case TCPOPT_CCNEW: 1724 if (optlen != TCPOLEN_CC) 1725 continue; 1726 if (!(ti->ti_flags & TH_SYN)) 1727 continue; 1728 to->to_flag |= TOF_CCNEW; 1729 bcopy((char *)cp + 2, 1730 (char *)&to->to_cc, sizeof(to->to_cc)); 1731 NTOHL(to->to_cc); 1732 /* 1733 * A CC or CC.new option received in a SYN makes 1734 * it ok to send CC in subsequent segments. 1735 */ 1736 tp->t_flags |= TF_RCVD_CC; 1737 break; 1738 case TCPOPT_CCECHO: 1739 if (optlen != TCPOLEN_CC) 1740 continue; 1741 if (!(ti->ti_flags & TH_SYN)) 1742 continue; 1743 to->to_flag |= TOF_CCECHO; 1744 bcopy((char *)cp + 2, 1745 (char *)&to->to_ccecho, sizeof(to->to_ccecho)); 1746 NTOHL(to->to_ccecho); 1747 break; 1748 } 1749 } 1750 if (ti->ti_flags & TH_SYN) 1751 tcp_mss(tp, mss); /* sets t_maxseg */ 1752} 1753 1754/* 1755 * Pull out of band byte out of a segment so 1756 * it doesn't appear in the user's data queue. 1757 * It is still reflected in the segment length for 1758 * sequencing purposes. 1759 */ 1760void 1761tcp_pulloutofband(so, ti, m) 1762 struct socket *so; 1763 struct tcpiphdr *ti; 1764 register struct mbuf *m; 1765{ 1766 int cnt = ti->ti_urp - 1; 1767 1768 while (cnt >= 0) { 1769 if (m->m_len > cnt) { 1770 char *cp = mtod(m, caddr_t) + cnt; 1771 struct tcpcb *tp = sototcpcb(so); 1772 1773 tp->t_iobc = *cp; 1774 tp->t_oobflags |= TCPOOB_HAVEDATA; 1775 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 1776 m->m_len--; 1777 return; 1778 } 1779 cnt -= m->m_len; 1780 m = m->m_next; 1781 if (m == 0) 1782 break; 1783 } 1784 panic("tcp_pulloutofband"); 1785} 1786 1787/* 1788 * Collect new round-trip time estimate 1789 * and update averages and current timeout. 1790 */ 1791void 1792tcp_xmit_timer(tp, rtt) 1793 register struct tcpcb *tp; 1794 short rtt; 1795{ 1796 register short delta; 1797 1798 tcpstat.tcps_rttupdated++; 1799 if (tp->t_srtt != 0) { 1800 /* 1801 * srtt is stored as fixed point with 3 bits after the 1802 * binary point (i.e., scaled by 8). The following magic 1803 * is equivalent to the smoothing algorithm in rfc793 with 1804 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 1805 * point). Adjust rtt to origin 0. 1806 */ 1807 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT); 1808 if ((tp->t_srtt += delta) <= 0) 1809 tp->t_srtt = 1; 1810 /* 1811 * We accumulate a smoothed rtt variance (actually, a 1812 * smoothed mean difference), then set the retransmit 1813 * timer to smoothed rtt + 4 times the smoothed variance. 1814 * rttvar is stored as fixed point with 2 bits after the 1815 * binary point (scaled by 4). The following is 1816 * equivalent to rfc793 smoothing with an alpha of .75 1817 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 1818 * rfc793's wired-in beta. 1819 */ 1820 if (delta < 0) 1821 delta = -delta; 1822 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT); 1823 if ((tp->t_rttvar += delta) <= 0) 1824 tp->t_rttvar = 1; 1825 } else { 1826 /* 1827 * No rtt measurement yet - use the unsmoothed rtt. 1828 * Set the variance to half the rtt (so our first 1829 * retransmit happens at 3*rtt). 1830 */ 1831 tp->t_srtt = rtt << TCP_RTT_SHIFT; 1832 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 1833 } 1834 tp->t_rtt = 0; 1835 tp->t_rxtshift = 0; 1836 1837 /* 1838 * the retransmit should happen at rtt + 4 * rttvar. 1839 * Because of the way we do the smoothing, srtt and rttvar 1840 * will each average +1/2 tick of bias. When we compute 1841 * the retransmit timer, we want 1/2 tick of rounding and 1842 * 1 extra tick because of +-1/2 tick uncertainty in the 1843 * firing of the timer. The bias will give us exactly the 1844 * 1.5 tick we need. But, because the bias is 1845 * statistical, we have to test that we don't drop below 1846 * the minimum feasible timer (which is 2 ticks). 1847 */ 1848 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 1849 tp->t_rttmin, TCPTV_REXMTMAX); 1850 1851 /* 1852 * We received an ack for a packet that wasn't retransmitted; 1853 * it is probably safe to discard any error indications we've 1854 * received recently. This isn't quite right, but close enough 1855 * for now (a route might have failed after we sent a segment, 1856 * and the return path might not be symmetrical). 1857 */ 1858 tp->t_softerror = 0; 1859} 1860 1861/* 1862 * Determine a reasonable value for maxseg size. 1863 * If the route is known, check route for mtu. 1864 * If none, use an mss that can be handled on the outgoing 1865 * interface without forcing IP to fragment; if bigger than 1866 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 1867 * to utilize large mbufs. If no route is found, route has no mtu, 1868 * or the destination isn't local, use a default, hopefully conservative 1869 * size (usually 512 or the default IP max size, but no more than the mtu 1870 * of the interface), as we can't discover anything about intervening 1871 * gateways or networks. We also initialize the congestion/slow start 1872 * window to be a single segment if the destination isn't local. 1873 * While looking at the routing entry, we also initialize other path-dependent 1874 * parameters from pre-set or cached values in the routing entry. 1875 * 1876 * Also take into account the space needed for options that we 1877 * send regularly. Make maxseg shorter by that amount to assure 1878 * that we can send maxseg amount of data even when the options 1879 * are present. Store the upper limit of the length of options plus 1880 * data in maxopd. 1881 * 1882 * NOTE that this routine is only called when we process an incoming 1883 * segment, for outgoing segments only tcp_mssopt is called. 1884 * 1885 * In case of T/TCP, we call this routine during implicit connection 1886 * setup as well (offer = -1), to initialize maxseg from the cached 1887 * MSS of our peer. 1888 */ 1889void 1890tcp_mss(tp, offer) 1891 struct tcpcb *tp; 1892 int offer; 1893{ 1894 register struct rtentry *rt; 1895 struct ifnet *ifp; 1896 register int rtt, mss; 1897 u_long bufsize; 1898 struct inpcb *inp; 1899 struct socket *so; 1900 struct rmxp_tao *taop; 1901 int origoffer = offer; 1902 1903 inp = tp->t_inpcb; 1904 if ((rt = tcp_rtlookup(inp)) == NULL) { 1905 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt; 1906 return; 1907 } 1908 ifp = rt->rt_ifp; 1909 so = inp->inp_socket; 1910 1911 taop = rmx_taop(rt->rt_rmx); 1912 /* 1913 * Offer == -1 means that we didn't receive SYN yet, 1914 * use cached value in that case; 1915 */ 1916 if (offer == -1) 1917 offer = taop->tao_mssopt; 1918 /* 1919 * Offer == 0 means that there was no MSS on the SYN segment, 1920 * in this case we use tcp_mssdflt. 1921 */ 1922 if (offer == 0) 1923 offer = tcp_mssdflt; 1924 else 1925 /* 1926 * Sanity check: make sure that maxopd will be large 1927 * enough to allow some data on segments even is the 1928 * all the option space is used (40bytes). Otherwise 1929 * funny things may happen in tcp_output. 1930 */ 1931 offer = max(offer, 64); 1932 taop->tao_mssopt = offer; 1933 1934#ifdef RTV_MTU /* if route characteristics exist ... */ 1935 /* 1936 * While we're here, check if there's an initial rtt 1937 * or rttvar. Convert from the route-table units 1938 * to scaled multiples of the slow timeout timer. 1939 */ 1940 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { 1941 /* 1942 * XXX the lock bit for RTT indicates that the value 1943 * is also a minimum value; this is subject to time. 1944 */ 1945 if (rt->rt_rmx.rmx_locks & RTV_RTT) 1946 tp->t_rttmin = rtt / (RTM_RTTUNIT / PR_SLOWHZ); 1947 tp->t_srtt = rtt / (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE)); 1948 if (rt->rt_rmx.rmx_rttvar) 1949 tp->t_rttvar = rt->rt_rmx.rmx_rttvar / 1950 (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE)); 1951 else 1952 /* default variation is +- 1 rtt */ 1953 tp->t_rttvar = 1954 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 1955 TCPT_RANGESET(tp->t_rxtcur, 1956 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 1957 tp->t_rttmin, TCPTV_REXMTMAX); 1958 } 1959 /* 1960 * if there's an mtu associated with the route, use it 1961 */ 1962 if (rt->rt_rmx.rmx_mtu) 1963 mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr); 1964 else 1965#endif /* RTV_MTU */ 1966 { 1967 mss = ifp->if_mtu - sizeof(struct tcpiphdr); 1968 if (!in_localaddr(inp->inp_faddr)) 1969 mss = min(mss, tcp_mssdflt); 1970 } 1971 mss = min(mss, offer); 1972 /* 1973 * maxopd stores the maximum length of data AND options 1974 * in a segment; maxseg is the amount of data in a normal 1975 * segment. We need to store this value (maxopd) apart 1976 * from maxseg, because now every segment carries options 1977 * and thus we normally have somewhat less data in segments. 1978 */ 1979 tp->t_maxopd = mss; 1980 1981 /* 1982 * In case of T/TCP, origoffer==-1 indicates, that no segments 1983 * were received yet. In this case we just guess, otherwise 1984 * we do the same as before T/TCP. 1985 */ 1986 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 1987 (origoffer == -1 || 1988 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 1989 mss -= TCPOLEN_TSTAMP_APPA; 1990 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 1991 (origoffer == -1 || 1992 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)) 1993 mss -= TCPOLEN_CC_APPA; 1994 1995#if (MCLBYTES & (MCLBYTES - 1)) == 0 1996 if (mss > MCLBYTES) 1997 mss &= ~(MCLBYTES-1); 1998#else 1999 if (mss > MCLBYTES) 2000 mss = mss / MCLBYTES * MCLBYTES; 2001#endif 2002 /* 2003 * If there's a pipesize, change the socket buffer 2004 * to that size. Make the socket buffers an integral 2005 * number of mss units; if the mss is larger than 2006 * the socket buffer, decrease the mss. 2007 */ 2008#ifdef RTV_SPIPE 2009 if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0) 2010#endif 2011 bufsize = so->so_snd.sb_hiwat; 2012 if (bufsize < mss) 2013 mss = bufsize; 2014 else { 2015 bufsize = roundup(bufsize, mss); 2016 if (bufsize > sb_max) 2017 bufsize = sb_max; 2018 (void)sbreserve(&so->so_snd, bufsize); 2019 } 2020 tp->t_maxseg = mss; 2021 2022#ifdef RTV_RPIPE 2023 if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0) 2024#endif 2025 bufsize = so->so_rcv.sb_hiwat; 2026 if (bufsize > mss) { 2027 bufsize = roundup(bufsize, mss); 2028 if (bufsize > sb_max) 2029 bufsize = sb_max; 2030 (void)sbreserve(&so->so_rcv, bufsize); 2031 } 2032 /* 2033 * Don't force slow-start on local network. 2034 */ 2035 if (!in_localaddr(inp->inp_faddr)) 2036 tp->snd_cwnd = mss; 2037 2038#ifdef RTV_SSTHRESH 2039 if (rt->rt_rmx.rmx_ssthresh) { 2040 /* 2041 * There's some sort of gateway or interface 2042 * buffer limit on the path. Use this to set 2043 * the slow start threshhold, but set the 2044 * threshold to no less than 2*mss. 2045 */ 2046 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); 2047 } 2048#endif 2049} 2050 2051/* 2052 * Determine the MSS option to send on an outgoing SYN. 2053 */ 2054int 2055tcp_mssopt(tp) 2056 struct tcpcb *tp; 2057{ 2058 struct rtentry *rt; 2059 2060 rt = tcp_rtlookup(tp->t_inpcb); 2061 if (rt == NULL) 2062 return tcp_mssdflt; 2063 2064 /* 2065 * if there's an mtu associated with the route, use it 2066 */ 2067 if (rt->rt_rmx.rmx_mtu) 2068 return rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr); 2069 2070 return rt->rt_ifp->if_mtu - sizeof(struct tcpiphdr); 2071} 2072#endif /* TUBA_INCLUDE */ 2073