tcp_input.c revision 171605
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)tcp_input.c	8.12 (Berkeley) 5/24/95
30 * $FreeBSD: head/sys/netinet/tcp_input.c 171605 2007-07-27 00:57:06Z silby $
31 */
32
33#include "opt_ipfw.h"		/* for ipfw_fwd	*/
34#include "opt_inet.h"
35#include "opt_inet6.h"
36#include "opt_ipsec.h"
37#include "opt_mac.h"
38#include "opt_tcpdebug.h"
39
40#include <sys/param.h>
41#include <sys/kernel.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/proc.h>		/* for proc0 declaration */
45#include <sys/protosw.h>
46#include <sys/signalvar.h>
47#include <sys/socket.h>
48#include <sys/socketvar.h>
49#include <sys/sysctl.h>
50#include <sys/syslog.h>
51#include <sys/systm.h>
52
53#include <machine/cpu.h>	/* before tcp_seq.h, for tcp_random18() */
54
55#include <vm/uma.h>
56
57#include <net/if.h>
58#include <net/route.h>
59
60#include <netinet/in.h>
61#include <netinet/in_pcb.h>
62#include <netinet/in_systm.h>
63#include <netinet/in_var.h>
64#include <netinet/ip.h>
65#include <netinet/ip_icmp.h>	/* required for icmp_var.h */
66#include <netinet/icmp_var.h>	/* for ICMP_BANDLIM */
67#include <netinet/ip_var.h>
68#include <netinet/ip_options.h>
69#include <netinet/ip6.h>
70#include <netinet/icmp6.h>
71#include <netinet6/in6_pcb.h>
72#include <netinet6/ip6_var.h>
73#include <netinet6/nd6.h>
74#include <netinet/tcp.h>
75#include <netinet/tcp_fsm.h>
76#include <netinet/tcp_seq.h>
77#include <netinet/tcp_timer.h>
78#include <netinet/tcp_var.h>
79#include <netinet6/tcp6_var.h>
80#include <netinet/tcpip.h>
81#include <netinet/tcp_syncache.h>
82#ifdef TCPDEBUG
83#include <netinet/tcp_debug.h>
84#endif /* TCPDEBUG */
85
86#ifdef IPSEC
87#include <netipsec/ipsec.h>
88#include <netipsec/ipsec6.h>
89#endif /*IPSEC*/
90
91#include <machine/in_cksum.h>
92
93#include <security/mac/mac_framework.h>
94
95static const int tcprexmtthresh = 3;
96
97struct	tcpstat tcpstat;
98SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
99    &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
100
101static int tcp_log_in_vain = 0;
102SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
103    &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports");
104
105static int blackhole = 0;
106SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
107    &blackhole, 0, "Do not send RST on segments to closed ports");
108
109int tcp_delack_enabled = 1;
110SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
111    &tcp_delack_enabled, 0,
112    "Delay ACK to try and piggyback it onto a data packet");
113
114static int drop_synfin = 0;
115SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
116    &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
117
118static int tcp_do_rfc3042 = 1;
119SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
120    &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)");
121
122static int tcp_do_rfc3390 = 1;
123SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
124    &tcp_do_rfc3390, 0,
125    "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
126
127static int tcp_insecure_rst = 0;
128SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
129    &tcp_insecure_rst, 0,
130    "Follow the old (insecure) criteria for accepting RST packets");
131
132int	tcp_do_autorcvbuf = 1;
133SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
134    &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing");
135
136int	tcp_autorcvbuf_inc = 16*1024;
137SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
138    &tcp_autorcvbuf_inc, 0,
139    "Incrementor step size of automatic receive buffer");
140
141int	tcp_autorcvbuf_max = 256*1024;
142SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
143    &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer");
144
145struct inpcbhead tcb;
146#define	tcb6	tcb  /* for KAME src sync over BSD*'s */
147struct inpcbinfo tcbinfo;
148
149static void	 tcp_dooptions(struct tcpopt *, u_char *, int, int);
150static void	 tcp_do_segment(struct mbuf *, struct tcphdr *,
151		     struct socket *, struct tcpcb *, int, int);
152static void	 tcp_dropwithreset(struct mbuf *, struct tcphdr *,
153		     struct tcpcb *, int, int);
154static void	 tcp_pulloutofband(struct socket *,
155		     struct tcphdr *, struct mbuf *, int);
156static void	 tcp_xmit_timer(struct tcpcb *, int);
157static void	 tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
158
159/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
160#ifdef INET6
161#define ND6_HINT(tp) \
162do { \
163	if ((tp) && (tp)->t_inpcb && \
164	    ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
165		nd6_nud_hint(NULL, NULL, 0); \
166} while (0)
167#else
168#define ND6_HINT(tp)
169#endif
170
171/*
172 * Indicate whether this ack should be delayed.  We can delay the ack if
173 *	- there is no delayed ack timer in progress and
174 *	- our last ack wasn't a 0-sized window.  We never want to delay
175 *	  the ack that opens up a 0-sized window and
176 *		- delayed acks are enabled or
177 *		- this is a half-synchronized T/TCP connection.
178 */
179#define DELAY_ACK(tp)							\
180	((!tcp_timer_active(tp, TT_DELACK) &&				\
181	    (tp->t_flags & TF_RXWIN0SENT) == 0) &&			\
182	    (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
183
184
185/*
186 * TCP input handling is split into multiple parts:
187 *   tcp6_input is a thin wrapper around tcp_input for the extended
188 *	ip6_protox[] call format in ip6_input
189 *   tcp_input handles primary segment validation, inpcb lookup and
190 *	SYN processing on listen sockets
191 *   tcp_do_segment processes the ACK and text of the segment for
192 *	establishing, established and closing connections
193 */
194#ifdef INET6
195int
196tcp6_input(struct mbuf **mp, int *offp, int proto)
197{
198	struct mbuf *m = *mp;
199	struct in6_ifaddr *ia6;
200
201	IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
202
203	/*
204	 * draft-itojun-ipv6-tcp-to-anycast
205	 * better place to put this in?
206	 */
207	ia6 = ip6_getdstifaddr(m);
208	if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
209		struct ip6_hdr *ip6;
210
211		ip6 = mtod(m, struct ip6_hdr *);
212		icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
213			    (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
214		return IPPROTO_DONE;
215	}
216
217	tcp_input(m, *offp);
218	return IPPROTO_DONE;
219}
220#endif
221
222void
223tcp_input(struct mbuf *m, int off0)
224{
225	struct tcphdr *th;
226	struct ip *ip = NULL;
227	struct ipovly *ipov;
228	struct inpcb *inp = NULL;
229	struct tcpcb *tp = NULL;
230	struct socket *so = NULL;
231	u_char *optp = NULL;
232	int optlen = 0;
233	int len, tlen, off;
234	int drop_hdrlen;
235	int thflags;
236	int rstreason = 0;	/* For badport_bandlim accounting purposes */
237#ifdef IPFIREWALL_FORWARD
238	struct m_tag *fwd_tag;
239#endif
240#ifdef INET6
241	struct ip6_hdr *ip6 = NULL;
242	int isipv6;
243#else
244	const void *ip6 = NULL;
245	const int isipv6 = 0;
246#endif
247	struct tcpopt to;		/* options in this segment */
248	char *s = NULL;			/* address and port logging */
249
250#ifdef TCPDEBUG
251	/*
252	 * The size of tcp_saveipgen must be the size of the max ip header,
253	 * now IPv6.
254	 */
255	u_char tcp_saveipgen[IP6_HDR_LEN];
256	struct tcphdr tcp_savetcp;
257	short ostate = 0;
258#endif
259
260#ifdef INET6
261	isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
262#endif
263
264	to.to_flags = 0;
265	tcpstat.tcps_rcvtotal++;
266
267	if (isipv6) {
268#ifdef INET6
269		/* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
270		ip6 = mtod(m, struct ip6_hdr *);
271		tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
272		if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
273			tcpstat.tcps_rcvbadsum++;
274			goto drop;
275		}
276		th = (struct tcphdr *)((caddr_t)ip6 + off0);
277
278		/*
279		 * Be proactive about unspecified IPv6 address in source.
280		 * As we use all-zero to indicate unbounded/unconnected pcb,
281		 * unspecified IPv6 address can be used to confuse us.
282		 *
283		 * Note that packets with unspecified IPv6 destination is
284		 * already dropped in ip6_input.
285		 */
286		if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
287			/* XXX stat */
288			goto drop;
289		}
290#else
291		th = NULL;		/* XXX: Avoid compiler warning. */
292#endif
293	} else {
294		/*
295		 * Get IP and TCP header together in first mbuf.
296		 * Note: IP leaves IP header in first mbuf.
297		 */
298		if (off0 > sizeof (struct ip)) {
299			ip_stripoptions(m, (struct mbuf *)0);
300			off0 = sizeof(struct ip);
301		}
302		if (m->m_len < sizeof (struct tcpiphdr)) {
303			if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
304			    == NULL) {
305				tcpstat.tcps_rcvshort++;
306				return;
307			}
308		}
309		ip = mtod(m, struct ip *);
310		ipov = (struct ipovly *)ip;
311		th = (struct tcphdr *)((caddr_t)ip + off0);
312		tlen = ip->ip_len;
313
314		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
315			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
316				th->th_sum = m->m_pkthdr.csum_data;
317			else
318				th->th_sum = in_pseudo(ip->ip_src.s_addr,
319						ip->ip_dst.s_addr,
320						htonl(m->m_pkthdr.csum_data +
321							ip->ip_len +
322							IPPROTO_TCP));
323			th->th_sum ^= 0xffff;
324#ifdef TCPDEBUG
325			ipov->ih_len = (u_short)tlen;
326			ipov->ih_len = htons(ipov->ih_len);
327#endif
328		} else {
329			/*
330			 * Checksum extended TCP header and data.
331			 */
332			len = sizeof (struct ip) + tlen;
333			bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
334			ipov->ih_len = (u_short)tlen;
335			ipov->ih_len = htons(ipov->ih_len);
336			th->th_sum = in_cksum(m, len);
337		}
338		if (th->th_sum) {
339			tcpstat.tcps_rcvbadsum++;
340			goto drop;
341		}
342		/* Re-initialization for later version check */
343		ip->ip_v = IPVERSION;
344	}
345
346	/*
347	 * Check that TCP offset makes sense,
348	 * pull out TCP options and adjust length.		XXX
349	 */
350	off = th->th_off << 2;
351	if (off < sizeof (struct tcphdr) || off > tlen) {
352		tcpstat.tcps_rcvbadoff++;
353		goto drop;
354	}
355	tlen -= off;	/* tlen is used instead of ti->ti_len */
356	if (off > sizeof (struct tcphdr)) {
357		if (isipv6) {
358#ifdef INET6
359			IP6_EXTHDR_CHECK(m, off0, off, );
360			ip6 = mtod(m, struct ip6_hdr *);
361			th = (struct tcphdr *)((caddr_t)ip6 + off0);
362#endif
363		} else {
364			if (m->m_len < sizeof(struct ip) + off) {
365				if ((m = m_pullup(m, sizeof (struct ip) + off))
366				    == NULL) {
367					tcpstat.tcps_rcvshort++;
368					return;
369				}
370				ip = mtod(m, struct ip *);
371				ipov = (struct ipovly *)ip;
372				th = (struct tcphdr *)((caddr_t)ip + off0);
373			}
374		}
375		optlen = off - sizeof (struct tcphdr);
376		optp = (u_char *)(th + 1);
377	}
378	thflags = th->th_flags;
379
380	/*
381	 * Convert TCP protocol specific fields to host format.
382	 */
383	th->th_seq = ntohl(th->th_seq);
384	th->th_ack = ntohl(th->th_ack);
385	th->th_win = ntohs(th->th_win);
386	th->th_urp = ntohs(th->th_urp);
387
388	/*
389	 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
390	 */
391	drop_hdrlen = off0 + off;
392
393	/*
394	 * Locate pcb for segment.
395	 */
396	INP_INFO_WLOCK(&tcbinfo);
397findpcb:
398	INP_INFO_WLOCK_ASSERT(&tcbinfo);
399#ifdef IPFIREWALL_FORWARD
400	/*
401	 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
402	 */
403	fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
404
405	if (fwd_tag != NULL && isipv6 == 0) {	/* IPv6 support is not yet */
406		struct sockaddr_in *next_hop;
407
408		next_hop = (struct sockaddr_in *)(fwd_tag+1);
409		/*
410		 * Transparently forwarded. Pretend to be the destination.
411		 * already got one like this?
412		 */
413		inp = in_pcblookup_hash(&tcbinfo,
414					ip->ip_src, th->th_sport,
415					ip->ip_dst, th->th_dport,
416					0, m->m_pkthdr.rcvif);
417		if (!inp) {
418			/* It's new.  Try to find the ambushing socket. */
419			inp = in_pcblookup_hash(&tcbinfo,
420						ip->ip_src, th->th_sport,
421						next_hop->sin_addr,
422						next_hop->sin_port ?
423						    ntohs(next_hop->sin_port) :
424						    th->th_dport,
425						INPLOOKUP_WILDCARD,
426						m->m_pkthdr.rcvif);
427		}
428		/* Remove the tag from the packet.  We don't need it anymore. */
429		m_tag_delete(m, fwd_tag);
430	} else
431#endif /* IPFIREWALL_FORWARD */
432	{
433		if (isipv6) {
434#ifdef INET6
435			inp = in6_pcblookup_hash(&tcbinfo,
436						 &ip6->ip6_src, th->th_sport,
437						 &ip6->ip6_dst, th->th_dport,
438						 INPLOOKUP_WILDCARD,
439						 m->m_pkthdr.rcvif);
440#endif
441		} else
442			inp = in_pcblookup_hash(&tcbinfo,
443						ip->ip_src, th->th_sport,
444						ip->ip_dst, th->th_dport,
445						INPLOOKUP_WILDCARD,
446						m->m_pkthdr.rcvif);
447	}
448
449#ifdef IPSEC
450#ifdef INET6
451	if (isipv6 && inp != NULL && ipsec6_in_reject(m, inp)) {
452		ipsec6stat.in_polvio++;
453		goto dropunlock;
454	} else
455#endif /* INET6 */
456	if (inp != NULL && ipsec4_in_reject(m, inp)) {
457		ipsec4stat.in_polvio++;
458		goto dropunlock;
459	}
460#endif /* IPSEC */
461
462	/*
463	 * If the INPCB does not exist then all data in the incoming
464	 * segment is discarded and an appropriate RST is sent back.
465	 */
466	if (inp == NULL) {
467		/*
468		 * Log communication attempts to ports that are not
469		 * in use.
470		 */
471		if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
472		    tcp_log_in_vain == 2) {
473			if ((s = tcp_log_addrs(NULL, th, (void *)ip, ip6)))
474				log(LOG_INFO, "%s; %s: Connection attempt "
475				    "to closed port\n", s, __func__);
476		}
477		/*
478		 * When blackholing do not respond with a RST but
479		 * completely ignore the segment and drop it.
480		 */
481		if ((blackhole == 1 && (thflags & TH_SYN)) ||
482		    blackhole == 2)
483			goto dropunlock;
484
485		rstreason = BANDLIM_RST_CLOSEDPORT;
486		goto dropwithreset;
487	}
488	INP_LOCK(inp);
489
490	/*
491	 * Check the minimum TTL for socket.
492	 */
493	if (inp->inp_ip_minttl != 0) {
494#ifdef INET6
495		if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
496			goto dropunlock;
497		else
498#endif
499		if (inp->inp_ip_minttl > ip->ip_ttl)
500			goto dropunlock;
501	}
502
503	/*
504	 * A previous connection in TIMEWAIT state is supposed to catch
505	 * stray or duplicate segments arriving late.  If this segment
506	 * was a legitimate new connection attempt the old INPCB gets
507	 * removed and we can try again to find a listening socket.
508	 */
509	if (inp->inp_vflag & INP_TIMEWAIT) {
510		if (thflags & TH_SYN)
511			tcp_dooptions(&to, optp, optlen, TO_SYN);
512		/*
513		 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
514		 */
515		if (tcp_twcheck(inp, &to, th, m, tlen))
516			goto findpcb;
517		INP_INFO_WUNLOCK(&tcbinfo);
518		return;
519	}
520	/*
521	 * The TCPCB may no longer exist if the connection is winding
522	 * down or it is in the CLOSED state.  Either way we drop the
523	 * segment and send an appropriate response.
524	 */
525	tp = intotcpcb(inp);
526	if (tp == NULL || tp->t_state == TCPS_CLOSED) {
527		rstreason = BANDLIM_RST_CLOSEDPORT;
528		goto dropwithreset;
529	}
530
531#ifdef MAC
532	INP_LOCK_ASSERT(inp);
533	if (mac_check_inpcb_deliver(inp, m))
534		goto dropunlock;
535#endif
536	so = inp->inp_socket;
537	KASSERT(so != NULL, ("%s: so == NULL", __func__));
538#ifdef TCPDEBUG
539	if (so->so_options & SO_DEBUG) {
540		ostate = tp->t_state;
541		if (isipv6) {
542#ifdef INET6
543			bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
544#endif
545		} else
546			bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
547		tcp_savetcp = *th;
548	}
549#endif
550	/*
551	 * When the socket is accepting connections (the INPCB is in LISTEN
552	 * state) we look into the SYN cache if this is a new connection
553	 * attempt or the completion of a previous one.
554	 */
555	if (so->so_options & SO_ACCEPTCONN) {
556		struct in_conninfo inc;
557
558		KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
559		    "tp not listening", __func__));
560
561		bzero(&inc, sizeof(inc));
562		inc.inc_isipv6 = isipv6;
563#ifdef INET6
564		if (isipv6) {
565			inc.inc6_faddr = ip6->ip6_src;
566			inc.inc6_laddr = ip6->ip6_dst;
567		} else
568#endif
569		{
570			inc.inc_faddr = ip->ip_src;
571			inc.inc_laddr = ip->ip_dst;
572		}
573		inc.inc_fport = th->th_sport;
574		inc.inc_lport = th->th_dport;
575
576		/*
577		 * Check for an existing connection attempt in syncache if
578		 * the flag is only ACK.  A successful lookup creates a new
579		 * socket appended to the listen queue in SYN_RECEIVED state.
580		 */
581		if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
582			/*
583			 * Parse the TCP options here because
584			 * syncookies need access to the reflected
585			 * timestamp.
586			 */
587			tcp_dooptions(&to, optp, optlen, 0);
588			/*
589			 * NB: syncache_expand() doesn't unlock
590			 * inp and tcpinfo locks.
591			 */
592			if (!syncache_expand(&inc, &to, th, &so, m)) {
593				/*
594				 * No syncache entry or ACK was not
595				 * for our SYN/ACK.  Send a RST.
596				 * NB: syncache did its own logging
597				 * of the failure cause.
598				 */
599				rstreason = BANDLIM_RST_OPENPORT;
600				goto dropwithreset;
601			}
602			if (so == NULL) {
603				/*
604				 * We completed the 3-way handshake
605				 * but could not allocate a socket
606				 * either due to memory shortage,
607				 * listen queue length limits or
608				 * global socket limits.  Send RST
609				 * or wait and have the remote end
610				 * retransmit the ACK for another
611				 * try.
612				 */
613				if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
614					log(LOG_DEBUG, "%s; %s: Listen socket: "
615					    "Socket allocation failed due to "
616					    "limits or memory shortage, %s\n",
617					    s, __func__, (tcp_sc_rst_sock_fail ?
618					    "sending RST" : "try again"));
619				if (tcp_sc_rst_sock_fail) {
620					rstreason = BANDLIM_UNLIMITED;
621					goto dropwithreset;
622				} else
623					goto dropunlock;
624			}
625			/*
626			 * Socket is created in state SYN_RECEIVED.
627			 * Unlock the listen socket, lock the newly
628			 * created socket and update the tp variable.
629			 */
630			INP_UNLOCK(inp);	/* listen socket */
631			inp = sotoinpcb(so);
632			INP_LOCK(inp);		/* new connection */
633			tp = intotcpcb(inp);
634			KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
635			    ("%s: ", __func__));
636			/*
637			 * Process the segment and the data it
638			 * contains.  tcp_do_segment() consumes
639			 * the mbuf chain and unlocks the inpcb.
640			 */
641			tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen);
642			INP_INFO_UNLOCK_ASSERT(&tcbinfo);
643			return;
644		}
645		/*
646		 * Segment flag validation for new connection attempts:
647		 *
648		 * Our (SYN|ACK) response was rejected.
649		 * Check with syncache and remove entry to prevent
650		 * retransmits.
651		 */
652		if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
653			if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
654				log(LOG_DEBUG, "%s; %s: Listen socket: "
655				    "Our SYN|ACK was rejected, connection "
656				    "attempt aborted by remote endpoint\n",
657				    s, __func__);
658			syncache_chkrst(&inc, th);
659			goto dropunlock;
660		}
661		/*
662		 * Spurious RST.  Ignore.
663		 */
664		if (thflags & TH_RST) {
665			if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
666				log(LOG_DEBUG, "%s; %s: Listen socket: "
667				    "Spurious RST, segment rejected\n",
668				    s, __func__);
669			goto dropunlock;
670		}
671		/*
672		 * We can't do anything without SYN.
673		 */
674		if ((thflags & TH_SYN) == 0) {
675			if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
676				log(LOG_DEBUG, "%s; %s: Listen socket: "
677				    "SYN is missing, segment rejected\n",
678				    s, __func__);
679			tcpstat.tcps_badsyn++;
680			goto dropunlock;
681		}
682		/*
683		 * (SYN|ACK) is bogus on a listen socket.
684		 */
685		if (thflags & TH_ACK) {
686			if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
687				log(LOG_DEBUG, "%s; %s: Listen socket: "
688				    "SYN|ACK invalid, segment rejected\n",
689				    s, __func__);
690			syncache_badack(&inc);	/* XXX: Not needed! */
691			tcpstat.tcps_badsyn++;
692			rstreason = BANDLIM_RST_OPENPORT;
693			goto dropwithreset;
694		}
695		/*
696		 * If the drop_synfin option is enabled, drop all
697		 * segments with both the SYN and FIN bits set.
698		 * This prevents e.g. nmap from identifying the
699		 * TCP/IP stack.
700		 * XXX: Poor reasoning.  nmap has other methods
701		 * and is constantly refining its stack detection
702		 * strategies.
703		 * XXX: This is a violation of the TCP specification
704		 * and was used by RFC1644.
705		 */
706		if ((thflags & TH_FIN) && drop_synfin) {
707			if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
708				log(LOG_DEBUG, "%s; %s: Listen socket: "
709				    "SYN|FIN segment rejected (based on "
710				    "sysctl setting)\n", s, __func__);
711			tcpstat.tcps_badsyn++;
712                	goto dropunlock;
713		}
714		/*
715		 * Segment's flags are (SYN) or (SYN|FIN).
716		 *
717		 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
718		 * as they do not affect the state of the TCP FSM.
719		 * The data pointed to by TH_URG and th_urp is ignored.
720		 */
721		KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
722		    ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
723		KASSERT(thflags & (TH_SYN),
724		    ("%s: Listen socket: TH_SYN not set", __func__));
725#ifdef INET6
726		/*
727		 * If deprecated address is forbidden,
728		 * we do not accept SYN to deprecated interface
729		 * address to prevent any new inbound connection from
730		 * getting established.
731		 * When we do not accept SYN, we send a TCP RST,
732		 * with deprecated source address (instead of dropping
733		 * it).  We compromise it as it is much better for peer
734		 * to send a RST, and RST will be the final packet
735		 * for the exchange.
736		 *
737		 * If we do not forbid deprecated addresses, we accept
738		 * the SYN packet.  RFC2462 does not suggest dropping
739		 * SYN in this case.
740		 * If we decipher RFC2462 5.5.4, it says like this:
741		 * 1. use of deprecated addr with existing
742		 *    communication is okay - "SHOULD continue to be
743		 *    used"
744		 * 2. use of it with new communication:
745		 *   (2a) "SHOULD NOT be used if alternate address
746		 *        with sufficient scope is available"
747		 *   (2b) nothing mentioned otherwise.
748		 * Here we fall into (2b) case as we have no choice in
749		 * our source address selection - we must obey the peer.
750		 *
751		 * The wording in RFC2462 is confusing, and there are
752		 * multiple description text for deprecated address
753		 * handling - worse, they are not exactly the same.
754		 * I believe 5.5.4 is the best one, so we follow 5.5.4.
755		 */
756		if (isipv6 && !ip6_use_deprecated) {
757			struct in6_ifaddr *ia6;
758
759			if ((ia6 = ip6_getdstifaddr(m)) &&
760			    (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
761				if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
762				    log(LOG_DEBUG, "%s; %s: Listen socket: "
763					"Connection attempt to deprecated "
764					"IPv6 address rejected\n",
765					s, __func__);
766				rstreason = BANDLIM_RST_OPENPORT;
767				goto dropwithreset;
768			}
769		}
770#endif
771		/*
772		 * Basic sanity checks on incoming SYN requests:
773		 *   Don't respond if the destination is a link layer
774		 *	broadcast according to RFC1122 4.2.3.10, p. 104.
775		 *   If it is from this socket it must be forged.
776		 *   Don't respond if the source or destination is a
777		 *	global or subnet broad- or multicast address.
778		 *   Note that it is quite possible to receive unicast
779		 *	link-layer packets with a broadcast IP address. Use
780		 *	in_broadcast() to find them.
781		 */
782		if (m->m_flags & (M_BCAST|M_MCAST)) {
783			if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
784			    log(LOG_DEBUG, "%s; %s: Listen socket: "
785				"Connection attempt from broad- or multicast "
786				"link layer address rejected\n", s, __func__);
787			goto dropunlock;
788		}
789		if (isipv6) {
790#ifdef INET6
791			if (th->th_dport == th->th_sport &&
792			    IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
793				if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
794				    log(LOG_DEBUG, "%s; %s: Listen socket: "
795					"Connection attempt to/from self "
796					"rejected\n", s, __func__);
797				goto dropunlock;
798			}
799			if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
800			    IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
801				if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
802				    log(LOG_DEBUG, "%s; %s: Listen socket: "
803					"Connection attempt from/to multicast "
804					"address rejected\n", s, __func__);
805				goto dropunlock;
806			}
807#endif
808		} else {
809			if (th->th_dport == th->th_sport &&
810			    ip->ip_dst.s_addr == ip->ip_src.s_addr) {
811				if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
812				    log(LOG_DEBUG, "%s; %s: Listen socket: "
813					"Connection attempt from/to self "
814					"rejected\n", s, __func__);
815				goto dropunlock;
816			}
817			if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
818			    IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
819			    ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
820			    in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
821				if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
822				    log(LOG_DEBUG, "%s; %s: Listen socket: "
823					"Connection attempt from/to broad- "
824					"or multicast address rejected\n",
825					s, __func__);
826				goto dropunlock;
827			}
828		}
829		/*
830		 * SYN appears to be valid.  Create compressed TCP state
831		 * for syncache.
832		 */
833#ifdef TCPDEBUG
834		if (so->so_options & SO_DEBUG)
835			tcp_trace(TA_INPUT, ostate, tp,
836			    (void *)tcp_saveipgen, &tcp_savetcp, 0);
837#endif
838		tcp_dooptions(&to, optp, optlen, TO_SYN);
839		syncache_add(&inc, &to, th, inp, &so, m);
840		/*
841		 * Entry added to syncache and mbuf consumed.
842		 * Everything already unlocked by syncache_add().
843		 */
844		INP_INFO_UNLOCK_ASSERT(&tcbinfo);
845		return;
846	}
847
848	/*
849	 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
850	 * state.  tcp_do_segment() always consumes the mbuf chain, unlocks
851	 * the inpcb, and unlocks pcbinfo.
852	 */
853	tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen);
854	INP_INFO_UNLOCK_ASSERT(&tcbinfo);
855	return;
856
857dropwithreset:
858	INP_INFO_WLOCK_ASSERT(&tcbinfo);
859	tcp_dropwithreset(m, th, tp, tlen, rstreason);
860	m = NULL;	/* mbuf chain got consumed. */
861dropunlock:
862	INP_INFO_WLOCK_ASSERT(&tcbinfo);
863	if (inp != NULL)
864		INP_UNLOCK(inp);
865	INP_INFO_WUNLOCK(&tcbinfo);
866drop:
867	INP_INFO_UNLOCK_ASSERT(&tcbinfo);
868	if (s != NULL)
869		free(s, M_TCPLOG);
870	if (m != NULL)
871		m_freem(m);
872	return;
873}
874
875static void
876tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
877    struct tcpcb *tp, int drop_hdrlen, int tlen)
878{
879	int thflags, acked, ourfinisacked, needoutput = 0;
880	int headlocked = 1;
881	int rstreason, todrop, win;
882	u_long tiwin;
883	struct tcpopt to;
884
885#ifdef TCPDEBUG
886	/*
887	 * The size of tcp_saveipgen must be the size of the max ip header,
888	 * now IPv6.
889	 */
890	u_char tcp_saveipgen[IP6_HDR_LEN];
891	struct tcphdr tcp_savetcp;
892	short ostate = 0;
893#endif
894	thflags = th->th_flags;
895
896	INP_INFO_WLOCK_ASSERT(&tcbinfo);
897	INP_LOCK_ASSERT(tp->t_inpcb);
898	KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
899	    __func__));
900	KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
901	    __func__));
902
903	/*
904	 * Segment received on connection.
905	 * Reset idle time and keep-alive timer.
906	 * XXX: This should be done after segment
907	 * validation to ignore broken/spoofed segs.
908	 */
909	tp->t_rcvtime = ticks;
910	if (TCPS_HAVEESTABLISHED(tp->t_state))
911		tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
912
913	/*
914	 * Unscale the window into a 32-bit value.
915	 * For the SYN_SENT state the scale is zero.
916	 */
917	tiwin = th->th_win << tp->snd_scale;
918
919	/*
920	 * Parse options on any incoming segment.
921	 */
922	tcp_dooptions(&to, (u_char *)(th + 1),
923	    (th->th_off << 2) - sizeof(struct tcphdr),
924	    (thflags & TH_SYN) ? TO_SYN : 0);
925
926	/*
927	 * If echoed timestamp is later than the current time,
928	 * fall back to non RFC1323 RTT calculation.  Normalize
929	 * timestamp if syncookies were used when this connection
930	 * was established.
931	 */
932	if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
933		to.to_tsecr -= tp->ts_offset;
934		if (TSTMP_GT(to.to_tsecr, ticks))
935			to.to_tsecr = 0;
936	}
937
938	/*
939	 * Process options only when we get SYN/ACK back. The SYN case
940	 * for incoming connections is handled in tcp_syncache.
941	 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
942	 * or <SYN,ACK>) segment itself is never scaled.
943	 * XXX this is traditional behavior, may need to be cleaned up.
944	 */
945	if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
946		if ((to.to_flags & TOF_SCALE) &&
947		    (tp->t_flags & TF_REQ_SCALE)) {
948			tp->t_flags |= TF_RCVD_SCALE;
949			tp->snd_scale = to.to_wscale;
950		}
951		/*
952		 * Initial send window.  It will be updated with
953		 * the next incoming segment to the scaled value.
954		 */
955		tp->snd_wnd = th->th_win;
956		if (to.to_flags & TOF_TS) {
957			tp->t_flags |= TF_RCVD_TSTMP;
958			tp->ts_recent = to.to_tsval;
959			tp->ts_recent_age = ticks;
960		}
961		if (to.to_flags & TOF_MSS)
962			tcp_mss(tp, to.to_mss);
963		if ((tp->t_flags & TF_SACK_PERMIT) &&
964		    (to.to_flags & TOF_SACKPERM) == 0)
965			tp->t_flags &= ~TF_SACK_PERMIT;
966	}
967
968	/*
969	 * Header prediction: check for the two common cases
970	 * of a uni-directional data xfer.  If the packet has
971	 * no control flags, is in-sequence, the window didn't
972	 * change and we're not retransmitting, it's a
973	 * candidate.  If the length is zero and the ack moved
974	 * forward, we're the sender side of the xfer.  Just
975	 * free the data acked & wake any higher level process
976	 * that was blocked waiting for space.  If the length
977	 * is non-zero and the ack didn't move, we're the
978	 * receiver side.  If we're getting packets in-order
979	 * (the reassembly queue is empty), add the data to
980	 * the socket buffer and note that we need a delayed ack.
981	 * Make sure that the hidden state-flags are also off.
982	 * Since we check for TCPS_ESTABLISHED first, it can only
983	 * be TH_NEEDSYN.
984	 */
985	if (tp->t_state == TCPS_ESTABLISHED &&
986	    th->th_seq == tp->rcv_nxt &&
987	    (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
988	    tp->snd_nxt == tp->snd_max &&
989	    tiwin && tiwin == tp->snd_wnd &&
990	    ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
991	    LIST_EMPTY(&tp->t_segq) &&
992	    ((to.to_flags & TOF_TS) == 0 ||
993	     TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
994
995		/*
996		 * If last ACK falls within this segment's sequence numbers,
997		 * record the timestamp.
998		 * NOTE that the test is modified according to the latest
999		 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1000		 */
1001		if ((to.to_flags & TOF_TS) != 0 &&
1002		    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1003			tp->ts_recent_age = ticks;
1004			tp->ts_recent = to.to_tsval;
1005		}
1006
1007		if (tlen == 0) {
1008			if (SEQ_GT(th->th_ack, tp->snd_una) &&
1009			    SEQ_LEQ(th->th_ack, tp->snd_max) &&
1010			    tp->snd_cwnd >= tp->snd_wnd &&
1011			    ((!tcp_do_newreno &&
1012			      !(tp->t_flags & TF_SACK_PERMIT) &&
1013			      tp->t_dupacks < tcprexmtthresh) ||
1014			     ((tcp_do_newreno ||
1015			       (tp->t_flags & TF_SACK_PERMIT)) &&
1016			      !IN_FASTRECOVERY(tp) &&
1017			      (to.to_flags & TOF_SACK) == 0 &&
1018			      TAILQ_EMPTY(&tp->snd_holes)))) {
1019				KASSERT(headlocked,
1020				    ("%s: headlocked", __func__));
1021				INP_INFO_WUNLOCK(&tcbinfo);
1022				headlocked = 0;
1023				/*
1024				 * This is a pure ack for outstanding data.
1025				 */
1026				++tcpstat.tcps_predack;
1027				/*
1028				 * "bad retransmit" recovery.
1029				 */
1030				if (tp->t_rxtshift == 1 &&
1031				    ticks < tp->t_badrxtwin) {
1032					++tcpstat.tcps_sndrexmitbad;
1033					tp->snd_cwnd = tp->snd_cwnd_prev;
1034					tp->snd_ssthresh =
1035					    tp->snd_ssthresh_prev;
1036					tp->snd_recover = tp->snd_recover_prev;
1037					if (tp->t_flags & TF_WASFRECOVERY)
1038					    ENTER_FASTRECOVERY(tp);
1039					tp->snd_nxt = tp->snd_max;
1040					tp->t_badrxtwin = 0;
1041				}
1042
1043				/*
1044				 * Recalculate the transmit timer / rtt.
1045				 *
1046				 * Some boxes send broken timestamp replies
1047				 * during the SYN+ACK phase, ignore
1048				 * timestamps of 0 or we could calculate a
1049				 * huge RTT and blow up the retransmit timer.
1050				 */
1051				if ((to.to_flags & TOF_TS) != 0 &&
1052				    to.to_tsecr) {
1053					if (!tp->t_rttlow ||
1054					    tp->t_rttlow > ticks - to.to_tsecr)
1055						tp->t_rttlow = ticks - to.to_tsecr;
1056					tcp_xmit_timer(tp,
1057					    ticks - to.to_tsecr + 1);
1058				} else if (tp->t_rtttime &&
1059				    SEQ_GT(th->th_ack, tp->t_rtseq)) {
1060					if (!tp->t_rttlow ||
1061					    tp->t_rttlow > ticks - tp->t_rtttime)
1062						tp->t_rttlow = ticks - tp->t_rtttime;
1063					tcp_xmit_timer(tp,
1064							ticks - tp->t_rtttime);
1065				}
1066				tcp_xmit_bandwidth_limit(tp, th->th_ack);
1067				acked = th->th_ack - tp->snd_una;
1068				tcpstat.tcps_rcvackpack++;
1069				tcpstat.tcps_rcvackbyte += acked;
1070				sbdrop(&so->so_snd, acked);
1071				if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1072				    SEQ_LEQ(th->th_ack, tp->snd_recover))
1073					tp->snd_recover = th->th_ack - 1;
1074				tp->snd_una = th->th_ack;
1075				/*
1076				 * Pull snd_wl2 up to prevent seq wrap relative
1077				 * to th_ack.
1078				 */
1079				tp->snd_wl2 = th->th_ack;
1080				tp->t_dupacks = 0;
1081				m_freem(m);
1082				ND6_HINT(tp); /* Some progress has been made. */
1083
1084				/*
1085				 * If all outstanding data are acked, stop
1086				 * retransmit timer, otherwise restart timer
1087				 * using current (possibly backed-off) value.
1088				 * If process is waiting for space,
1089				 * wakeup/selwakeup/signal.  If data
1090				 * are ready to send, let tcp_output
1091				 * decide between more output or persist.
1092				 */
1093#ifdef TCPDEBUG
1094				if (so->so_options & SO_DEBUG)
1095					tcp_trace(TA_INPUT, ostate, tp,
1096					    (void *)tcp_saveipgen,
1097					    &tcp_savetcp, 0);
1098#endif
1099				if (tp->snd_una == tp->snd_max)
1100					tcp_timer_activate(tp, TT_REXMT, 0);
1101				else if (!tcp_timer_active(tp, TT_PERSIST))
1102					tcp_timer_activate(tp, TT_REXMT,
1103						      tp->t_rxtcur);
1104				sowwakeup(so);
1105				if (so->so_snd.sb_cc)
1106					(void) tcp_output(tp);
1107				goto check_delack;
1108			}
1109		} else if (th->th_ack == tp->snd_una &&
1110		    tlen <= sbspace(&so->so_rcv)) {
1111			int newsize = 0;	/* automatic sockbuf scaling */
1112
1113			KASSERT(headlocked, ("%s: headlocked", __func__));
1114			INP_INFO_WUNLOCK(&tcbinfo);
1115			headlocked = 0;
1116			/*
1117			 * This is a pure, in-sequence data packet
1118			 * with nothing on the reassembly queue and
1119			 * we have enough buffer space to take it.
1120			 */
1121			/* Clean receiver SACK report if present */
1122			if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1123				tcp_clean_sackreport(tp);
1124			++tcpstat.tcps_preddat;
1125			tp->rcv_nxt += tlen;
1126			/*
1127			 * Pull snd_wl1 up to prevent seq wrap relative to
1128			 * th_seq.
1129			 */
1130			tp->snd_wl1 = th->th_seq;
1131			/*
1132			 * Pull rcv_up up to prevent seq wrap relative to
1133			 * rcv_nxt.
1134			 */
1135			tp->rcv_up = tp->rcv_nxt;
1136			tcpstat.tcps_rcvpack++;
1137			tcpstat.tcps_rcvbyte += tlen;
1138			ND6_HINT(tp);	/* Some progress has been made */
1139#ifdef TCPDEBUG
1140			if (so->so_options & SO_DEBUG)
1141				tcp_trace(TA_INPUT, ostate, tp,
1142				    (void *)tcp_saveipgen, &tcp_savetcp, 0);
1143#endif
1144		/*
1145		 * Automatic sizing of receive socket buffer.  Often the send
1146		 * buffer size is not optimally adjusted to the actual network
1147		 * conditions at hand (delay bandwidth product).  Setting the
1148		 * buffer size too small limits throughput on links with high
1149		 * bandwidth and high delay (eg. trans-continental/oceanic links).
1150		 *
1151		 * On the receive side the socket buffer memory is only rarely
1152		 * used to any significant extent.  This allows us to be much
1153		 * more aggressive in scaling the receive socket buffer.  For
1154		 * the case that the buffer space is actually used to a large
1155		 * extent and we run out of kernel memory we can simply drop
1156		 * the new segments; TCP on the sender will just retransmit it
1157		 * later.  Setting the buffer size too big may only consume too
1158		 * much kernel memory if the application doesn't read() from
1159		 * the socket or packet loss or reordering makes use of the
1160		 * reassembly queue.
1161		 *
1162		 * The criteria to step up the receive buffer one notch are:
1163		 *  1. the number of bytes received during the time it takes
1164		 *     one timestamp to be reflected back to us (the RTT);
1165		 *  2. received bytes per RTT is within seven eighth of the
1166		 *     current socket buffer size;
1167		 *  3. receive buffer size has not hit maximal automatic size;
1168		 *
1169		 * This algorithm does one step per RTT at most and only if
1170		 * we receive a bulk stream w/o packet losses or reorderings.
1171		 * Shrinking the buffer during idle times is not necessary as
1172		 * it doesn't consume any memory when idle.
1173		 *
1174		 * TODO: Only step up if the application is actually serving
1175		 * the buffer to better manage the socket buffer resources.
1176		 */
1177			if (tcp_do_autorcvbuf &&
1178			    to.to_tsecr &&
1179			    (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1180				if (to.to_tsecr > tp->rfbuf_ts &&
1181				    to.to_tsecr - tp->rfbuf_ts < hz) {
1182					if (tp->rfbuf_cnt >
1183					    (so->so_rcv.sb_hiwat / 8 * 7) &&
1184					    so->so_rcv.sb_hiwat <
1185					    tcp_autorcvbuf_max) {
1186						newsize =
1187						    min(so->so_rcv.sb_hiwat +
1188						    tcp_autorcvbuf_inc,
1189						    tcp_autorcvbuf_max);
1190					}
1191					/* Start over with next RTT. */
1192					tp->rfbuf_ts = 0;
1193					tp->rfbuf_cnt = 0;
1194				} else
1195					tp->rfbuf_cnt += tlen;	/* add up */
1196			}
1197
1198			/* Add data to socket buffer. */
1199			SOCKBUF_LOCK(&so->so_rcv);
1200			if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1201				m_freem(m);
1202			} else {
1203				/*
1204				 * Set new socket buffer size.
1205				 * Give up when limit is reached.
1206				 */
1207				if (newsize)
1208					if (!sbreserve_locked(&so->so_rcv,
1209					    newsize, so, curthread))
1210						so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1211				m_adj(m, drop_hdrlen);	/* delayed header drop */
1212				sbappendstream_locked(&so->so_rcv, m);
1213			}
1214			/* NB: sorwakeup_locked() does an implicit unlock. */
1215			sorwakeup_locked(so);
1216			if (DELAY_ACK(tp)) {
1217				tp->t_flags |= TF_DELACK;
1218			} else {
1219				tp->t_flags |= TF_ACKNOW;
1220				tcp_output(tp);
1221			}
1222			goto check_delack;
1223		}
1224	}
1225
1226	/*
1227	 * Calculate amount of space in receive window,
1228	 * and then do TCP input processing.
1229	 * Receive window is amount of space in rcv queue,
1230	 * but not less than advertised window.
1231	 */
1232	win = sbspace(&so->so_rcv);
1233	if (win < 0)
1234		win = 0;
1235	tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1236
1237	/* Reset receive buffer auto scaling when not in bulk receive mode. */
1238	tp->rfbuf_ts = 0;
1239	tp->rfbuf_cnt = 0;
1240
1241	switch (tp->t_state) {
1242
1243	/*
1244	 * If the state is SYN_RECEIVED:
1245	 *	if seg contains an ACK, but not for our SYN/ACK, send a RST.
1246	 */
1247	case TCPS_SYN_RECEIVED:
1248		if ((thflags & TH_ACK) &&
1249		    (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1250		     SEQ_GT(th->th_ack, tp->snd_max))) {
1251				rstreason = BANDLIM_RST_OPENPORT;
1252				goto dropwithreset;
1253		}
1254		break;
1255
1256	/*
1257	 * If the state is SYN_SENT:
1258	 *	if seg contains an ACK, but not for our SYN, drop the input.
1259	 *	if seg contains a RST, then drop the connection.
1260	 *	if seg does not contain SYN, then drop it.
1261	 * Otherwise this is an acceptable SYN segment
1262	 *	initialize tp->rcv_nxt and tp->irs
1263	 *	if seg contains ack then advance tp->snd_una
1264	 *	if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1265	 *	arrange for segment to be acked (eventually)
1266	 *	continue processing rest of data/controls, beginning with URG
1267	 */
1268	case TCPS_SYN_SENT:
1269		if ((thflags & TH_ACK) &&
1270		    (SEQ_LEQ(th->th_ack, tp->iss) ||
1271		     SEQ_GT(th->th_ack, tp->snd_max))) {
1272			rstreason = BANDLIM_UNLIMITED;
1273			goto dropwithreset;
1274		}
1275		if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST))
1276			tp = tcp_drop(tp, ECONNREFUSED);
1277		if (thflags & TH_RST)
1278			goto drop;
1279		if (!(thflags & TH_SYN))
1280			goto drop;
1281
1282		tp->irs = th->th_seq;
1283		tcp_rcvseqinit(tp);
1284		if (thflags & TH_ACK) {
1285			tcpstat.tcps_connects++;
1286			soisconnected(so);
1287#ifdef MAC
1288			SOCK_LOCK(so);
1289			mac_set_socket_peer_from_mbuf(m, so);
1290			SOCK_UNLOCK(so);
1291#endif
1292			/* Do window scaling on this connection? */
1293			if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1294				(TF_RCVD_SCALE|TF_REQ_SCALE)) {
1295				tp->rcv_scale = tp->request_r_scale;
1296			}
1297			tp->rcv_adv += tp->rcv_wnd;
1298			tp->snd_una++;		/* SYN is acked */
1299			/*
1300			 * If there's data, delay ACK; if there's also a FIN
1301			 * ACKNOW will be turned on later.
1302			 */
1303			if (DELAY_ACK(tp) && tlen != 0)
1304				tcp_timer_activate(tp, TT_DELACK,
1305				    tcp_delacktime);
1306			else
1307				tp->t_flags |= TF_ACKNOW;
1308			/*
1309			 * Received <SYN,ACK> in SYN_SENT[*] state.
1310			 * Transitions:
1311			 *	SYN_SENT  --> ESTABLISHED
1312			 *	SYN_SENT* --> FIN_WAIT_1
1313			 */
1314			tp->t_starttime = ticks;
1315			if (tp->t_flags & TF_NEEDFIN) {
1316				tp->t_state = TCPS_FIN_WAIT_1;
1317				tp->t_flags &= ~TF_NEEDFIN;
1318				thflags &= ~TH_SYN;
1319			} else {
1320				tp->t_state = TCPS_ESTABLISHED;
1321				tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1322			}
1323		} else {
1324			/*
1325			 * Received initial SYN in SYN-SENT[*] state =>
1326			 * simultaneous open.  If segment contains CC option
1327			 * and there is a cached CC, apply TAO test.
1328			 * If it succeeds, connection is * half-synchronized.
1329			 * Otherwise, do 3-way handshake:
1330			 *        SYN-SENT -> SYN-RECEIVED
1331			 *        SYN-SENT* -> SYN-RECEIVED*
1332			 * If there was no CC option, clear cached CC value.
1333			 */
1334			tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1335			tcp_timer_activate(tp, TT_REXMT, 0);
1336			tp->t_state = TCPS_SYN_RECEIVED;
1337		}
1338
1339		KASSERT(headlocked, ("%s: trimthenstep6: head not locked",
1340		    __func__));
1341		INP_LOCK_ASSERT(tp->t_inpcb);
1342
1343		/*
1344		 * Advance th->th_seq to correspond to first data byte.
1345		 * If data, trim to stay within window,
1346		 * dropping FIN if necessary.
1347		 */
1348		th->th_seq++;
1349		if (tlen > tp->rcv_wnd) {
1350			todrop = tlen - tp->rcv_wnd;
1351			m_adj(m, -todrop);
1352			tlen = tp->rcv_wnd;
1353			thflags &= ~TH_FIN;
1354			tcpstat.tcps_rcvpackafterwin++;
1355			tcpstat.tcps_rcvbyteafterwin += todrop;
1356		}
1357		tp->snd_wl1 = th->th_seq - 1;
1358		tp->rcv_up = th->th_seq;
1359		/*
1360		 * Client side of transaction: already sent SYN and data.
1361		 * If the remote host used T/TCP to validate the SYN,
1362		 * our data will be ACK'd; if so, enter normal data segment
1363		 * processing in the middle of step 5, ack processing.
1364		 * Otherwise, goto step 6.
1365		 */
1366		if (thflags & TH_ACK)
1367			goto process_ACK;
1368
1369		goto step6;
1370
1371	/*
1372	 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1373	 *      do normal processing.
1374	 *
1375	 * NB: Leftover from RFC1644 T/TCP.  Cases to be reused later.
1376	 */
1377	case TCPS_LAST_ACK:
1378	case TCPS_CLOSING:
1379		break;  /* continue normal processing */
1380	}
1381
1382	/*
1383	 * States other than LISTEN or SYN_SENT.
1384	 * First check the RST flag and sequence number since reset segments
1385	 * are exempt from the timestamp and connection count tests.  This
1386	 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1387	 * below which allowed reset segments in half the sequence space
1388	 * to fall though and be processed (which gives forged reset
1389	 * segments with a random sequence number a 50 percent chance of
1390	 * killing a connection).
1391	 * Then check timestamp, if present.
1392	 * Then check the connection count, if present.
1393	 * Then check that at least some bytes of segment are within
1394	 * receive window.  If segment begins before rcv_nxt,
1395	 * drop leading data (and SYN); if nothing left, just ack.
1396	 *
1397	 *
1398	 * If the RST bit is set, check the sequence number to see
1399	 * if this is a valid reset segment.
1400	 * RFC 793 page 37:
1401	 *   In all states except SYN-SENT, all reset (RST) segments
1402	 *   are validated by checking their SEQ-fields.  A reset is
1403	 *   valid if its sequence number is in the window.
1404	 * Note: this does not take into account delayed ACKs, so
1405	 *   we should test against last_ack_sent instead of rcv_nxt.
1406	 *   The sequence number in the reset segment is normally an
1407	 *   echo of our outgoing acknowlegement numbers, but some hosts
1408	 *   send a reset with the sequence number at the rightmost edge
1409	 *   of our receive window, and we have to handle this case.
1410	 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
1411	 *   that brute force RST attacks are possible.  To combat this,
1412	 *   we use a much stricter check while in the ESTABLISHED state,
1413	 *   only accepting RSTs where the sequence number is equal to
1414	 *   last_ack_sent.  In all other states (the states in which a
1415	 *   RST is more likely), the more permissive check is used.
1416	 * If we have multiple segments in flight, the intial reset
1417	 * segment sequence numbers will be to the left of last_ack_sent,
1418	 * but they will eventually catch up.
1419	 * In any case, it never made sense to trim reset segments to
1420	 * fit the receive window since RFC 1122 says:
1421	 *   4.2.2.12  RST Segment: RFC-793 Section 3.4
1422	 *
1423	 *    A TCP SHOULD allow a received RST segment to include data.
1424	 *
1425	 *    DISCUSSION
1426	 *         It has been suggested that a RST segment could contain
1427	 *         ASCII text that encoded and explained the cause of the
1428	 *         RST.  No standard has yet been established for such
1429	 *         data.
1430	 *
1431	 * If the reset segment passes the sequence number test examine
1432	 * the state:
1433	 *    SYN_RECEIVED STATE:
1434	 *	If passive open, return to LISTEN state.
1435	 *	If active open, inform user that connection was refused.
1436	 *    ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1437	 *	Inform user that connection was reset, and close tcb.
1438	 *    CLOSING, LAST_ACK STATES:
1439	 *	Close the tcb.
1440	 *    TIME_WAIT STATE:
1441	 *	Drop the segment - see Stevens, vol. 2, p. 964 and
1442	 *      RFC 1337.
1443	 */
1444	if (thflags & TH_RST) {
1445		if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
1446		    SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1447			switch (tp->t_state) {
1448
1449			case TCPS_SYN_RECEIVED:
1450				so->so_error = ECONNREFUSED;
1451				goto close;
1452
1453			case TCPS_ESTABLISHED:
1454				if (tcp_insecure_rst == 0 &&
1455				    !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
1456				    SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
1457				    !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
1458				    SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
1459					tcpstat.tcps_badrst++;
1460					goto drop;
1461				}
1462				/* FALLTHROUGH */
1463			case TCPS_FIN_WAIT_1:
1464			case TCPS_FIN_WAIT_2:
1465			case TCPS_CLOSE_WAIT:
1466				so->so_error = ECONNRESET;
1467			close:
1468				tp->t_state = TCPS_CLOSED;
1469				tcpstat.tcps_drops++;
1470				KASSERT(headlocked, ("%s: trimthenstep6: "
1471				    "tcp_close: head not locked", __func__));
1472				tp = tcp_close(tp);
1473				break;
1474
1475			case TCPS_CLOSING:
1476			case TCPS_LAST_ACK:
1477				KASSERT(headlocked, ("%s: trimthenstep6: "
1478				    "tcp_close.2: head not locked", __func__));
1479				tp = tcp_close(tp);
1480				break;
1481			}
1482		}
1483		goto drop;
1484	}
1485
1486	/*
1487	 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1488	 * and it's less than ts_recent, drop it.
1489	 */
1490	if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
1491	    TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1492
1493		/* Check to see if ts_recent is over 24 days old.  */
1494		if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1495			/*
1496			 * Invalidate ts_recent.  If this segment updates
1497			 * ts_recent, the age will be reset later and ts_recent
1498			 * will get a valid value.  If it does not, setting
1499			 * ts_recent to zero will at least satisfy the
1500			 * requirement that zero be placed in the timestamp
1501			 * echo reply when ts_recent isn't valid.  The
1502			 * age isn't reset until we get a valid ts_recent
1503			 * because we don't want out-of-order segments to be
1504			 * dropped when ts_recent is old.
1505			 */
1506			tp->ts_recent = 0;
1507		} else {
1508			tcpstat.tcps_rcvduppack++;
1509			tcpstat.tcps_rcvdupbyte += tlen;
1510			tcpstat.tcps_pawsdrop++;
1511			if (tlen)
1512				goto dropafterack;
1513			goto drop;
1514		}
1515	}
1516
1517	/*
1518	 * In the SYN-RECEIVED state, validate that the packet belongs to
1519	 * this connection before trimming the data to fit the receive
1520	 * window.  Check the sequence number versus IRS since we know
1521	 * the sequence numbers haven't wrapped.  This is a partial fix
1522	 * for the "LAND" DoS attack.
1523	 */
1524	if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1525		rstreason = BANDLIM_RST_OPENPORT;
1526		goto dropwithreset;
1527	}
1528
1529	todrop = tp->rcv_nxt - th->th_seq;
1530	if (todrop > 0) {
1531		if (thflags & TH_SYN) {
1532			thflags &= ~TH_SYN;
1533			th->th_seq++;
1534			if (th->th_urp > 1)
1535				th->th_urp--;
1536			else
1537				thflags &= ~TH_URG;
1538			todrop--;
1539		}
1540		/*
1541		 * Following if statement from Stevens, vol. 2, p. 960.
1542		 */
1543		if (todrop > tlen
1544		    || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1545			/*
1546			 * Any valid FIN must be to the left of the window.
1547			 * At this point the FIN must be a duplicate or out
1548			 * of sequence; drop it.
1549			 */
1550			thflags &= ~TH_FIN;
1551
1552			/*
1553			 * Send an ACK to resynchronize and drop any data.
1554			 * But keep on processing for RST or ACK.
1555			 */
1556			tp->t_flags |= TF_ACKNOW;
1557			todrop = tlen;
1558			tcpstat.tcps_rcvduppack++;
1559			tcpstat.tcps_rcvdupbyte += todrop;
1560		} else {
1561			tcpstat.tcps_rcvpartduppack++;
1562			tcpstat.tcps_rcvpartdupbyte += todrop;
1563		}
1564		drop_hdrlen += todrop;	/* drop from the top afterwards */
1565		th->th_seq += todrop;
1566		tlen -= todrop;
1567		if (th->th_urp > todrop)
1568			th->th_urp -= todrop;
1569		else {
1570			thflags &= ~TH_URG;
1571			th->th_urp = 0;
1572		}
1573	}
1574
1575	/*
1576	 * If new data are received on a connection after the
1577	 * user processes are gone, then RST the other end.
1578	 */
1579	if ((so->so_state & SS_NOFDREF) &&
1580	    tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1581		KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head "
1582		    "not locked", __func__));
1583		tp = tcp_close(tp);
1584		tcpstat.tcps_rcvafterclose++;
1585		rstreason = BANDLIM_UNLIMITED;
1586		goto dropwithreset;
1587	}
1588
1589	/*
1590	 * If segment ends after window, drop trailing data
1591	 * (and PUSH and FIN); if nothing left, just ACK.
1592	 */
1593	todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1594	if (todrop > 0) {
1595		tcpstat.tcps_rcvpackafterwin++;
1596		if (todrop >= tlen) {
1597			tcpstat.tcps_rcvbyteafterwin += tlen;
1598			/*
1599			 * If window is closed can only take segments at
1600			 * window edge, and have to drop data and PUSH from
1601			 * incoming segments.  Continue processing, but
1602			 * remember to ack.  Otherwise, drop segment
1603			 * and ack.
1604			 */
1605			if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1606				tp->t_flags |= TF_ACKNOW;
1607				tcpstat.tcps_rcvwinprobe++;
1608			} else
1609				goto dropafterack;
1610		} else
1611			tcpstat.tcps_rcvbyteafterwin += todrop;
1612		m_adj(m, -todrop);
1613		tlen -= todrop;
1614		thflags &= ~(TH_PUSH|TH_FIN);
1615	}
1616
1617	/*
1618	 * If last ACK falls within this segment's sequence numbers,
1619	 * record its timestamp.
1620	 * NOTE:
1621	 * 1) That the test incorporates suggestions from the latest
1622	 *    proposal of the tcplw@cray.com list (Braden 1993/04/26).
1623	 * 2) That updating only on newer timestamps interferes with
1624	 *    our earlier PAWS tests, so this check should be solely
1625	 *    predicated on the sequence space of this segment.
1626	 * 3) That we modify the segment boundary check to be
1627	 *        Last.ACK.Sent <= SEG.SEQ + SEG.Len
1628	 *    instead of RFC1323's
1629	 *        Last.ACK.Sent < SEG.SEQ + SEG.Len,
1630	 *    This modified check allows us to overcome RFC1323's
1631	 *    limitations as described in Stevens TCP/IP Illustrated
1632	 *    Vol. 2 p.869. In such cases, we can still calculate the
1633	 *    RTT correctly when RCV.NXT == Last.ACK.Sent.
1634	 */
1635	if ((to.to_flags & TOF_TS) != 0 &&
1636	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
1637	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
1638		((thflags & (TH_SYN|TH_FIN)) != 0))) {
1639		tp->ts_recent_age = ticks;
1640		tp->ts_recent = to.to_tsval;
1641	}
1642
1643	/*
1644	 * If a SYN is in the window, then this is an
1645	 * error and we send an RST and drop the connection.
1646	 */
1647	if (thflags & TH_SYN) {
1648		KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: "
1649		    "head not locked", __func__));
1650		tp = tcp_drop(tp, ECONNRESET);
1651		rstreason = BANDLIM_UNLIMITED;
1652		goto drop;
1653	}
1654
1655	/*
1656	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN
1657	 * flag is on (half-synchronized state), then queue data for
1658	 * later processing; else drop segment and return.
1659	 */
1660	if ((thflags & TH_ACK) == 0) {
1661		if (tp->t_state == TCPS_SYN_RECEIVED ||
1662		    (tp->t_flags & TF_NEEDSYN))
1663			goto step6;
1664		else if (tp->t_flags & TF_ACKNOW)
1665			goto dropafterack;
1666		else
1667			goto drop;
1668	}
1669
1670	/*
1671	 * Ack processing.
1672	 */
1673	switch (tp->t_state) {
1674
1675	/*
1676	 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
1677	 * ESTABLISHED state and continue processing.
1678	 * The ACK was checked above.
1679	 */
1680	case TCPS_SYN_RECEIVED:
1681
1682		tcpstat.tcps_connects++;
1683		soisconnected(so);
1684		/* Do window scaling? */
1685		if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1686			(TF_RCVD_SCALE|TF_REQ_SCALE)) {
1687			tp->rcv_scale = tp->request_r_scale;
1688			tp->snd_wnd = tiwin;
1689		}
1690		/*
1691		 * Make transitions:
1692		 *      SYN-RECEIVED  -> ESTABLISHED
1693		 *      SYN-RECEIVED* -> FIN-WAIT-1
1694		 */
1695		tp->t_starttime = ticks;
1696		if (tp->t_flags & TF_NEEDFIN) {
1697			tp->t_state = TCPS_FIN_WAIT_1;
1698			tp->t_flags &= ~TF_NEEDFIN;
1699		} else {
1700			tp->t_state = TCPS_ESTABLISHED;
1701			tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
1702		}
1703		/*
1704		 * If segment contains data or ACK, will call tcp_reass()
1705		 * later; if not, do so now to pass queued data to user.
1706		 */
1707		if (tlen == 0 && (thflags & TH_FIN) == 0)
1708			(void) tcp_reass(tp, (struct tcphdr *)0, 0,
1709			    (struct mbuf *)0);
1710		tp->snd_wl1 = th->th_seq - 1;
1711		/* FALLTHROUGH */
1712
1713	/*
1714	 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1715	 * ACKs.  If the ack is in the range
1716	 *	tp->snd_una < th->th_ack <= tp->snd_max
1717	 * then advance tp->snd_una to th->th_ack and drop
1718	 * data from the retransmission queue.  If this ACK reflects
1719	 * more up to date window information we update our window information.
1720	 */
1721	case TCPS_ESTABLISHED:
1722	case TCPS_FIN_WAIT_1:
1723	case TCPS_FIN_WAIT_2:
1724	case TCPS_CLOSE_WAIT:
1725	case TCPS_CLOSING:
1726	case TCPS_LAST_ACK:
1727		if (SEQ_GT(th->th_ack, tp->snd_max)) {
1728			tcpstat.tcps_rcvacktoomuch++;
1729			goto dropafterack;
1730		}
1731		if ((tp->t_flags & TF_SACK_PERMIT) &&
1732		    ((to.to_flags & TOF_SACK) ||
1733		     !TAILQ_EMPTY(&tp->snd_holes)))
1734			tcp_sack_doack(tp, &to, th->th_ack);
1735		if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
1736			if (tlen == 0 && tiwin == tp->snd_wnd) {
1737				tcpstat.tcps_rcvdupack++;
1738				/*
1739				 * If we have outstanding data (other than
1740				 * a window probe), this is a completely
1741				 * duplicate ack (ie, window info didn't
1742				 * change), the ack is the biggest we've
1743				 * seen and we've seen exactly our rexmt
1744				 * threshhold of them, assume a packet
1745				 * has been dropped and retransmit it.
1746				 * Kludge snd_nxt & the congestion
1747				 * window so we send only this one
1748				 * packet.
1749				 *
1750				 * We know we're losing at the current
1751				 * window size so do congestion avoidance
1752				 * (set ssthresh to half the current window
1753				 * and pull our congestion window back to
1754				 * the new ssthresh).
1755				 *
1756				 * Dup acks mean that packets have left the
1757				 * network (they're now cached at the receiver)
1758				 * so bump cwnd by the amount in the receiver
1759				 * to keep a constant cwnd packets in the
1760				 * network.
1761				 */
1762				if (!tcp_timer_active(tp, TT_REXMT) ||
1763				    th->th_ack != tp->snd_una)
1764					tp->t_dupacks = 0;
1765				else if (++tp->t_dupacks > tcprexmtthresh ||
1766				    ((tcp_do_newreno ||
1767				      (tp->t_flags & TF_SACK_PERMIT)) &&
1768				     IN_FASTRECOVERY(tp))) {
1769					if ((tp->t_flags & TF_SACK_PERMIT) &&
1770					    IN_FASTRECOVERY(tp)) {
1771						int awnd;
1772
1773						/*
1774						 * Compute the amount of data in flight first.
1775						 * We can inject new data into the pipe iff
1776						 * we have less than 1/2 the original window's
1777						 * worth of data in flight.
1778						 */
1779						awnd = (tp->snd_nxt - tp->snd_fack) +
1780							tp->sackhint.sack_bytes_rexmit;
1781						if (awnd < tp->snd_ssthresh) {
1782							tp->snd_cwnd += tp->t_maxseg;
1783							if (tp->snd_cwnd > tp->snd_ssthresh)
1784								tp->snd_cwnd = tp->snd_ssthresh;
1785						}
1786					} else
1787						tp->snd_cwnd += tp->t_maxseg;
1788					(void) tcp_output(tp);
1789					goto drop;
1790				} else if (tp->t_dupacks == tcprexmtthresh) {
1791					tcp_seq onxt = tp->snd_nxt;
1792					u_int win;
1793
1794					/*
1795					 * If we're doing sack, check to
1796					 * see if we're already in sack
1797					 * recovery. If we're not doing sack,
1798					 * check to see if we're in newreno
1799					 * recovery.
1800					 */
1801					if (tp->t_flags & TF_SACK_PERMIT) {
1802						if (IN_FASTRECOVERY(tp)) {
1803							tp->t_dupacks = 0;
1804							break;
1805						}
1806					} else if (tcp_do_newreno) {
1807						if (SEQ_LEQ(th->th_ack,
1808						    tp->snd_recover)) {
1809							tp->t_dupacks = 0;
1810							break;
1811						}
1812					}
1813					win = min(tp->snd_wnd, tp->snd_cwnd) /
1814					    2 / tp->t_maxseg;
1815					if (win < 2)
1816						win = 2;
1817					tp->snd_ssthresh = win * tp->t_maxseg;
1818					ENTER_FASTRECOVERY(tp);
1819					tp->snd_recover = tp->snd_max;
1820					tcp_timer_activate(tp, TT_REXMT, 0);
1821					tp->t_rtttime = 0;
1822					if (tp->t_flags & TF_SACK_PERMIT) {
1823						tcpstat.tcps_sack_recovery_episode++;
1824						tp->sack_newdata = tp->snd_nxt;
1825						tp->snd_cwnd = tp->t_maxseg;
1826						(void) tcp_output(tp);
1827						goto drop;
1828					}
1829					tp->snd_nxt = th->th_ack;
1830					tp->snd_cwnd = tp->t_maxseg;
1831					(void) tcp_output(tp);
1832					KASSERT(tp->snd_limited <= 2,
1833					    ("%s: tp->snd_limited too big",
1834					    __func__));
1835					tp->snd_cwnd = tp->snd_ssthresh +
1836					     tp->t_maxseg *
1837					     (tp->t_dupacks - tp->snd_limited);
1838					if (SEQ_GT(onxt, tp->snd_nxt))
1839						tp->snd_nxt = onxt;
1840					goto drop;
1841				} else if (tcp_do_rfc3042) {
1842					u_long oldcwnd = tp->snd_cwnd;
1843					tcp_seq oldsndmax = tp->snd_max;
1844					u_int sent;
1845
1846					KASSERT(tp->t_dupacks == 1 ||
1847					    tp->t_dupacks == 2,
1848					    ("%s: dupacks not 1 or 2",
1849					    __func__));
1850					if (tp->t_dupacks == 1)
1851						tp->snd_limited = 0;
1852					tp->snd_cwnd =
1853					    (tp->snd_nxt - tp->snd_una) +
1854					    (tp->t_dupacks - tp->snd_limited) *
1855					    tp->t_maxseg;
1856					(void) tcp_output(tp);
1857					sent = tp->snd_max - oldsndmax;
1858					if (sent > tp->t_maxseg) {
1859						KASSERT((tp->t_dupacks == 2 &&
1860						    tp->snd_limited == 0) ||
1861						   (sent == tp->t_maxseg + 1 &&
1862						    tp->t_flags & TF_SENTFIN),
1863						    ("%s: sent too much",
1864						    __func__));
1865						tp->snd_limited = 2;
1866					} else if (sent > 0)
1867						++tp->snd_limited;
1868					tp->snd_cwnd = oldcwnd;
1869					goto drop;
1870				}
1871			} else
1872				tp->t_dupacks = 0;
1873			break;
1874		}
1875
1876		KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
1877		    ("%s: th_ack <= snd_una", __func__));
1878
1879		/*
1880		 * If the congestion window was inflated to account
1881		 * for the other side's cached packets, retract it.
1882		 */
1883		if (tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) {
1884			if (IN_FASTRECOVERY(tp)) {
1885				if (SEQ_LT(th->th_ack, tp->snd_recover)) {
1886					if (tp->t_flags & TF_SACK_PERMIT)
1887						tcp_sack_partialack(tp, th);
1888					else
1889						tcp_newreno_partial_ack(tp, th);
1890				} else {
1891					/*
1892					 * Out of fast recovery.
1893					 * Window inflation should have left us
1894					 * with approximately snd_ssthresh
1895					 * outstanding data.
1896					 * But in case we would be inclined to
1897					 * send a burst, better to do it via
1898					 * the slow start mechanism.
1899					 */
1900					if (SEQ_GT(th->th_ack +
1901							tp->snd_ssthresh,
1902						   tp->snd_max))
1903						tp->snd_cwnd = tp->snd_max -
1904								th->th_ack +
1905								tp->t_maxseg;
1906					else
1907						tp->snd_cwnd = tp->snd_ssthresh;
1908				}
1909			}
1910		} else {
1911			if (tp->t_dupacks >= tcprexmtthresh &&
1912			    tp->snd_cwnd > tp->snd_ssthresh)
1913				tp->snd_cwnd = tp->snd_ssthresh;
1914		}
1915		tp->t_dupacks = 0;
1916		/*
1917		 * If we reach this point, ACK is not a duplicate,
1918		 *     i.e., it ACKs something we sent.
1919		 */
1920		if (tp->t_flags & TF_NEEDSYN) {
1921			/*
1922			 * T/TCP: Connection was half-synchronized, and our
1923			 * SYN has been ACK'd (so connection is now fully
1924			 * synchronized).  Go to non-starred state,
1925			 * increment snd_una for ACK of SYN, and check if
1926			 * we can do window scaling.
1927			 */
1928			tp->t_flags &= ~TF_NEEDSYN;
1929			tp->snd_una++;
1930			/* Do window scaling? */
1931			if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1932				(TF_RCVD_SCALE|TF_REQ_SCALE)) {
1933				tp->rcv_scale = tp->request_r_scale;
1934				/* Send window already scaled. */
1935			}
1936		}
1937
1938process_ACK:
1939		KASSERT(headlocked, ("%s: process_ACK: head not locked",
1940		    __func__));
1941		INP_LOCK_ASSERT(tp->t_inpcb);
1942
1943		acked = th->th_ack - tp->snd_una;
1944		tcpstat.tcps_rcvackpack++;
1945		tcpstat.tcps_rcvackbyte += acked;
1946
1947		/*
1948		 * If we just performed our first retransmit, and the ACK
1949		 * arrives within our recovery window, then it was a mistake
1950		 * to do the retransmit in the first place.  Recover our
1951		 * original cwnd and ssthresh, and proceed to transmit where
1952		 * we left off.
1953		 */
1954		if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
1955			++tcpstat.tcps_sndrexmitbad;
1956			tp->snd_cwnd = tp->snd_cwnd_prev;
1957			tp->snd_ssthresh = tp->snd_ssthresh_prev;
1958			tp->snd_recover = tp->snd_recover_prev;
1959			if (tp->t_flags & TF_WASFRECOVERY)
1960				ENTER_FASTRECOVERY(tp);
1961			tp->snd_nxt = tp->snd_max;
1962			tp->t_badrxtwin = 0;	/* XXX probably not required */
1963		}
1964
1965		/*
1966		 * If we have a timestamp reply, update smoothed
1967		 * round trip time.  If no timestamp is present but
1968		 * transmit timer is running and timed sequence
1969		 * number was acked, update smoothed round trip time.
1970		 * Since we now have an rtt measurement, cancel the
1971		 * timer backoff (cf., Phil Karn's retransmit alg.).
1972		 * Recompute the initial retransmit timer.
1973		 *
1974		 * Some boxes send broken timestamp replies
1975		 * during the SYN+ACK phase, ignore
1976		 * timestamps of 0 or we could calculate a
1977		 * huge RTT and blow up the retransmit timer.
1978		 */
1979		if ((to.to_flags & TOF_TS) != 0 &&
1980		    to.to_tsecr) {
1981			if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr)
1982				tp->t_rttlow = ticks - to.to_tsecr;
1983			tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
1984		} else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
1985			if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
1986				tp->t_rttlow = ticks - tp->t_rtttime;
1987			tcp_xmit_timer(tp, ticks - tp->t_rtttime);
1988		}
1989		tcp_xmit_bandwidth_limit(tp, th->th_ack);
1990
1991		/*
1992		 * If all outstanding data is acked, stop retransmit
1993		 * timer and remember to restart (more output or persist).
1994		 * If there is more data to be acked, restart retransmit
1995		 * timer, using current (possibly backed-off) value.
1996		 */
1997		if (th->th_ack == tp->snd_max) {
1998			tcp_timer_activate(tp, TT_REXMT, 0);
1999			needoutput = 1;
2000		} else if (!tcp_timer_active(tp, TT_PERSIST))
2001			tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2002
2003		/*
2004		 * If no data (only SYN) was ACK'd,
2005		 *    skip rest of ACK processing.
2006		 */
2007		if (acked == 0)
2008			goto step6;
2009
2010		/*
2011		 * When new data is acked, open the congestion window.
2012		 * If the window gives us less than ssthresh packets
2013		 * in flight, open exponentially (maxseg per packet).
2014		 * Otherwise open linearly: maxseg per window
2015		 * (maxseg^2 / cwnd per packet).
2016		 */
2017		if ((!tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) ||
2018		    !IN_FASTRECOVERY(tp)) {
2019			u_int cw = tp->snd_cwnd;
2020			u_int incr = tp->t_maxseg;
2021			if (cw > tp->snd_ssthresh)
2022				incr = incr * incr / cw;
2023			tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale);
2024		}
2025		SOCKBUF_LOCK(&so->so_snd);
2026		if (acked > so->so_snd.sb_cc) {
2027			tp->snd_wnd -= so->so_snd.sb_cc;
2028			sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2029			ourfinisacked = 1;
2030		} else {
2031			sbdrop_locked(&so->so_snd, acked);
2032			tp->snd_wnd -= acked;
2033			ourfinisacked = 0;
2034		}
2035		/* NB: sowwakeup_locked() does an implicit unlock. */
2036		sowwakeup_locked(so);
2037		/* Detect una wraparound. */
2038		if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
2039		    !IN_FASTRECOVERY(tp) &&
2040		    SEQ_GT(tp->snd_una, tp->snd_recover) &&
2041		    SEQ_LEQ(th->th_ack, tp->snd_recover))
2042			tp->snd_recover = th->th_ack - 1;
2043		if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
2044		    IN_FASTRECOVERY(tp) &&
2045		    SEQ_GEQ(th->th_ack, tp->snd_recover))
2046			EXIT_FASTRECOVERY(tp);
2047		tp->snd_una = th->th_ack;
2048		if (tp->t_flags & TF_SACK_PERMIT) {
2049			if (SEQ_GT(tp->snd_una, tp->snd_recover))
2050				tp->snd_recover = tp->snd_una;
2051		}
2052		if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2053			tp->snd_nxt = tp->snd_una;
2054
2055		switch (tp->t_state) {
2056
2057		/*
2058		 * In FIN_WAIT_1 STATE in addition to the processing
2059		 * for the ESTABLISHED state if our FIN is now acknowledged
2060		 * then enter FIN_WAIT_2.
2061		 */
2062		case TCPS_FIN_WAIT_1:
2063			if (ourfinisacked) {
2064				/*
2065				 * If we can't receive any more
2066				 * data, then closing user can proceed.
2067				 * Starting the timer is contrary to the
2068				 * specification, but if we don't get a FIN
2069				 * we'll hang forever.
2070				 *
2071				 * XXXjl:
2072				 * we should release the tp also, and use a
2073				 * compressed state.
2074				 */
2075				if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2076					int timeout;
2077
2078					soisdisconnected(so);
2079					timeout = (tcp_fast_finwait2_recycle) ?
2080						tcp_finwait2_timeout : tcp_maxidle;
2081					tcp_timer_activate(tp, TT_2MSL, timeout);
2082				}
2083				tp->t_state = TCPS_FIN_WAIT_2;
2084			}
2085			break;
2086
2087		/*
2088		 * In CLOSING STATE in addition to the processing for
2089		 * the ESTABLISHED state if the ACK acknowledges our FIN
2090		 * then enter the TIME-WAIT state, otherwise ignore
2091		 * the segment.
2092		 */
2093		case TCPS_CLOSING:
2094			if (ourfinisacked) {
2095				KASSERT(headlocked, ("%s: process_ACK: "
2096				    "head not locked", __func__));
2097				tcp_twstart(tp);
2098				INP_INFO_WUNLOCK(&tcbinfo);
2099				headlocked = 0;
2100				m_freem(m);
2101				return;
2102			}
2103			break;
2104
2105		/*
2106		 * In LAST_ACK, we may still be waiting for data to drain
2107		 * and/or to be acked, as well as for the ack of our FIN.
2108		 * If our FIN is now acknowledged, delete the TCB,
2109		 * enter the closed state and return.
2110		 */
2111		case TCPS_LAST_ACK:
2112			if (ourfinisacked) {
2113				KASSERT(headlocked, ("%s: process_ACK: "
2114				    "tcp_close: head not locked", __func__));
2115				tp = tcp_close(tp);
2116				goto drop;
2117			}
2118			break;
2119		}
2120	}
2121
2122step6:
2123	KASSERT(headlocked, ("%s: step6: head not locked", __func__));
2124	INP_LOCK_ASSERT(tp->t_inpcb);
2125
2126	/*
2127	 * Update window information.
2128	 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2129	 */
2130	if ((thflags & TH_ACK) &&
2131	    (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2132	    (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2133	     (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2134		/* keep track of pure window updates */
2135		if (tlen == 0 &&
2136		    tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2137			tcpstat.tcps_rcvwinupd++;
2138		tp->snd_wnd = tiwin;
2139		tp->snd_wl1 = th->th_seq;
2140		tp->snd_wl2 = th->th_ack;
2141		if (tp->snd_wnd > tp->max_sndwnd)
2142			tp->max_sndwnd = tp->snd_wnd;
2143		needoutput = 1;
2144	}
2145
2146	/*
2147	 * Process segments with URG.
2148	 */
2149	if ((thflags & TH_URG) && th->th_urp &&
2150	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2151		/*
2152		 * This is a kludge, but if we receive and accept
2153		 * random urgent pointers, we'll crash in
2154		 * soreceive.  It's hard to imagine someone
2155		 * actually wanting to send this much urgent data.
2156		 */
2157		SOCKBUF_LOCK(&so->so_rcv);
2158		if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2159			th->th_urp = 0;			/* XXX */
2160			thflags &= ~TH_URG;		/* XXX */
2161			SOCKBUF_UNLOCK(&so->so_rcv);	/* XXX */
2162			goto dodata;			/* XXX */
2163		}
2164		/*
2165		 * If this segment advances the known urgent pointer,
2166		 * then mark the data stream.  This should not happen
2167		 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2168		 * a FIN has been received from the remote side.
2169		 * In these states we ignore the URG.
2170		 *
2171		 * According to RFC961 (Assigned Protocols),
2172		 * the urgent pointer points to the last octet
2173		 * of urgent data.  We continue, however,
2174		 * to consider it to indicate the first octet
2175		 * of data past the urgent section as the original
2176		 * spec states (in one of two places).
2177		 */
2178		if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2179			tp->rcv_up = th->th_seq + th->th_urp;
2180			so->so_oobmark = so->so_rcv.sb_cc +
2181			    (tp->rcv_up - tp->rcv_nxt) - 1;
2182			if (so->so_oobmark == 0)
2183				so->so_rcv.sb_state |= SBS_RCVATMARK;
2184			sohasoutofband(so);
2185			tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2186		}
2187		SOCKBUF_UNLOCK(&so->so_rcv);
2188		/*
2189		 * Remove out of band data so doesn't get presented to user.
2190		 * This can happen independent of advancing the URG pointer,
2191		 * but if two URG's are pending at once, some out-of-band
2192		 * data may creep in... ick.
2193		 */
2194		if (th->th_urp <= (u_long)tlen &&
2195		    !(so->so_options & SO_OOBINLINE)) {
2196			/* hdr drop is delayed */
2197			tcp_pulloutofband(so, th, m, drop_hdrlen);
2198		}
2199	} else {
2200		/*
2201		 * If no out of band data is expected,
2202		 * pull receive urgent pointer along
2203		 * with the receive window.
2204		 */
2205		if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2206			tp->rcv_up = tp->rcv_nxt;
2207	}
2208dodata:							/* XXX */
2209	KASSERT(headlocked, ("%s: dodata: head not locked", __func__));
2210	INP_LOCK_ASSERT(tp->t_inpcb);
2211
2212	/*
2213	 * Process the segment text, merging it into the TCP sequencing queue,
2214	 * and arranging for acknowledgment of receipt if necessary.
2215	 * This process logically involves adjusting tp->rcv_wnd as data
2216	 * is presented to the user (this happens in tcp_usrreq.c,
2217	 * case PRU_RCVD).  If a FIN has already been received on this
2218	 * connection then we just ignore the text.
2219	 */
2220	if ((tlen || (thflags & TH_FIN)) &&
2221	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2222		tcp_seq save_start = th->th_seq;
2223		m_adj(m, drop_hdrlen);	/* delayed header drop */
2224		/*
2225		 * Insert segment which includes th into TCP reassembly queue
2226		 * with control block tp.  Set thflags to whether reassembly now
2227		 * includes a segment with FIN.  This handles the common case
2228		 * inline (segment is the next to be received on an established
2229		 * connection, and the queue is empty), avoiding linkage into
2230		 * and removal from the queue and repetition of various
2231		 * conversions.
2232		 * Set DELACK for segments received in order, but ack
2233		 * immediately when segments are out of order (so
2234		 * fast retransmit can work).
2235		 */
2236		if (th->th_seq == tp->rcv_nxt &&
2237		    LIST_EMPTY(&tp->t_segq) &&
2238		    TCPS_HAVEESTABLISHED(tp->t_state)) {
2239			if (DELAY_ACK(tp))
2240				tp->t_flags |= TF_DELACK;
2241			else
2242				tp->t_flags |= TF_ACKNOW;
2243			tp->rcv_nxt += tlen;
2244			thflags = th->th_flags & TH_FIN;
2245			tcpstat.tcps_rcvpack++;
2246			tcpstat.tcps_rcvbyte += tlen;
2247			ND6_HINT(tp);
2248			SOCKBUF_LOCK(&so->so_rcv);
2249			if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2250				m_freem(m);
2251			else
2252				sbappendstream_locked(&so->so_rcv, m);
2253			/* NB: sorwakeup_locked() does an implicit unlock. */
2254			sorwakeup_locked(so);
2255		} else {
2256			/*
2257			 * XXX: Due to the header drop above "th" is
2258			 * theoretically invalid by now.  Fortunately
2259			 * m_adj() doesn't actually frees any mbufs
2260			 * when trimming from the head.
2261			 */
2262			thflags = tcp_reass(tp, th, &tlen, m);
2263			tp->t_flags |= TF_ACKNOW;
2264		}
2265		if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2266			tcp_update_sack_list(tp, save_start, save_start + tlen);
2267#if 0
2268		/*
2269		 * Note the amount of data that peer has sent into
2270		 * our window, in order to estimate the sender's
2271		 * buffer size.
2272		 * XXX: Unused.
2273		 */
2274		len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2275#endif
2276	} else {
2277		m_freem(m);
2278		thflags &= ~TH_FIN;
2279	}
2280
2281	/*
2282	 * If FIN is received ACK the FIN and let the user know
2283	 * that the connection is closing.
2284	 */
2285	if (thflags & TH_FIN) {
2286		if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2287			socantrcvmore(so);
2288			/*
2289			 * If connection is half-synchronized
2290			 * (ie NEEDSYN flag on) then delay ACK,
2291			 * so it may be piggybacked when SYN is sent.
2292			 * Otherwise, since we received a FIN then no
2293			 * more input can be expected, send ACK now.
2294			 */
2295			if (tp->t_flags & TF_NEEDSYN)
2296				tp->t_flags |= TF_DELACK;
2297			else
2298				tp->t_flags |= TF_ACKNOW;
2299			tp->rcv_nxt++;
2300		}
2301		switch (tp->t_state) {
2302
2303		/*
2304		 * In SYN_RECEIVED and ESTABLISHED STATES
2305		 * enter the CLOSE_WAIT state.
2306		 */
2307		case TCPS_SYN_RECEIVED:
2308			tp->t_starttime = ticks;
2309			/* FALLTHROUGH */
2310		case TCPS_ESTABLISHED:
2311			tp->t_state = TCPS_CLOSE_WAIT;
2312			break;
2313
2314		/*
2315		 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2316		 * enter the CLOSING state.
2317		 */
2318		case TCPS_FIN_WAIT_1:
2319			tp->t_state = TCPS_CLOSING;
2320			break;
2321
2322		/*
2323		 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2324		 * starting the time-wait timer, turning off the other
2325		 * standard timers.
2326		 */
2327		case TCPS_FIN_WAIT_2:
2328			KASSERT(headlocked == 1, ("%s: dodata: "
2329			    "TCP_FIN_WAIT_2: head not locked", __func__));
2330			tcp_twstart(tp);
2331			INP_INFO_WUNLOCK(&tcbinfo);
2332			return;
2333		}
2334	}
2335	INP_INFO_WUNLOCK(&tcbinfo);
2336	headlocked = 0;
2337#ifdef TCPDEBUG
2338	if (so->so_options & SO_DEBUG)
2339		tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
2340			  &tcp_savetcp, 0);
2341#endif
2342
2343	/*
2344	 * Return any desired output.
2345	 */
2346	if (needoutput || (tp->t_flags & TF_ACKNOW))
2347		(void) tcp_output(tp);
2348
2349check_delack:
2350	KASSERT(headlocked == 0, ("%s: check_delack: head locked",
2351	    __func__));
2352	INP_INFO_UNLOCK_ASSERT(&tcbinfo);
2353	INP_LOCK_ASSERT(tp->t_inpcb);
2354	if (tp->t_flags & TF_DELACK) {
2355		tp->t_flags &= ~TF_DELACK;
2356		tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
2357	}
2358	INP_UNLOCK(tp->t_inpcb);
2359	return;
2360
2361dropafterack:
2362	KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__));
2363	/*
2364	 * Generate an ACK dropping incoming segment if it occupies
2365	 * sequence space, where the ACK reflects our state.
2366	 *
2367	 * We can now skip the test for the RST flag since all
2368	 * paths to this code happen after packets containing
2369	 * RST have been dropped.
2370	 *
2371	 * In the SYN-RECEIVED state, don't send an ACK unless the
2372	 * segment we received passes the SYN-RECEIVED ACK test.
2373	 * If it fails send a RST.  This breaks the loop in the
2374	 * "LAND" DoS attack, and also prevents an ACK storm
2375	 * between two listening ports that have been sent forged
2376	 * SYN segments, each with the source address of the other.
2377	 */
2378	if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2379	    (SEQ_GT(tp->snd_una, th->th_ack) ||
2380	     SEQ_GT(th->th_ack, tp->snd_max)) ) {
2381		rstreason = BANDLIM_RST_OPENPORT;
2382		goto dropwithreset;
2383	}
2384#ifdef TCPDEBUG
2385	if (so->so_options & SO_DEBUG)
2386		tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2387			  &tcp_savetcp, 0);
2388#endif
2389	KASSERT(headlocked, ("%s: headlocked should be 1", __func__));
2390	INP_INFO_WUNLOCK(&tcbinfo);
2391	tp->t_flags |= TF_ACKNOW;
2392	(void) tcp_output(tp);
2393	INP_UNLOCK(tp->t_inpcb);
2394	m_freem(m);
2395	return;
2396
2397dropwithreset:
2398	KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__));
2399
2400	tcp_dropwithreset(m, th, tp, tlen, rstreason);
2401
2402	if (tp != NULL)
2403		INP_UNLOCK(tp->t_inpcb);
2404	if (headlocked)
2405		INP_INFO_WUNLOCK(&tcbinfo);
2406	return;
2407
2408drop:
2409	/*
2410	 * Drop space held by incoming segment and return.
2411	 */
2412#ifdef TCPDEBUG
2413	if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2414		tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2415			  &tcp_savetcp, 0);
2416#endif
2417	if (tp != NULL)
2418		INP_UNLOCK(tp->t_inpcb);
2419	if (headlocked)
2420		INP_INFO_WUNLOCK(&tcbinfo);
2421	m_freem(m);
2422	return;
2423}
2424
2425/*
2426 * Issue RST and make ACK acceptable to originator of segment.
2427 * The mbuf must still include the original packet header.
2428 * tp may be NULL.
2429 */
2430static void
2431tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
2432    int tlen, int rstreason)
2433{
2434	struct ip *ip;
2435#ifdef INET6
2436	struct ip6_hdr *ip6;
2437#endif
2438	/* Don't bother if destination was broadcast/multicast. */
2439	if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
2440		goto drop;
2441#ifdef INET6
2442	if (mtod(m, struct ip *)->ip_v == 6) {
2443		ip6 = mtod(m, struct ip6_hdr *);
2444		if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2445		    IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2446			goto drop;
2447		/* IPv6 anycast check is done at tcp6_input() */
2448	} else
2449#endif
2450	{
2451		ip = mtod(m, struct ip *);
2452		if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2453		    IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2454		    ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2455		    in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2456			goto drop;
2457	}
2458
2459	/* Perform bandwidth limiting. */
2460	if (badport_bandlim(rstreason) < 0)
2461		goto drop;
2462
2463	/* tcp_respond consumes the mbuf chain. */
2464	if (th->th_flags & TH_ACK) {
2465		tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
2466		    th->th_ack, TH_RST);
2467	} else {
2468		if (th->th_flags & TH_SYN)
2469			tlen++;
2470		tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
2471		    (tcp_seq)0, TH_RST|TH_ACK);
2472	}
2473	return;
2474drop:
2475	m_freem(m);
2476	return;
2477}
2478
2479/*
2480 * Parse TCP options and place in tcpopt.
2481 */
2482static void
2483tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
2484{
2485	int opt, optlen;
2486
2487	to->to_flags = 0;
2488	for (; cnt > 0; cnt -= optlen, cp += optlen) {
2489		opt = cp[0];
2490		if (opt == TCPOPT_EOL)
2491			break;
2492		if (opt == TCPOPT_NOP)
2493			optlen = 1;
2494		else {
2495			if (cnt < 2)
2496				break;
2497			optlen = cp[1];
2498			if (optlen < 2 || optlen > cnt)
2499				break;
2500		}
2501		switch (opt) {
2502		case TCPOPT_MAXSEG:
2503			if (optlen != TCPOLEN_MAXSEG)
2504				continue;
2505			if (!(flags & TO_SYN))
2506				continue;
2507			to->to_flags |= TOF_MSS;
2508			bcopy((char *)cp + 2,
2509			    (char *)&to->to_mss, sizeof(to->to_mss));
2510			to->to_mss = ntohs(to->to_mss);
2511			break;
2512		case TCPOPT_WINDOW:
2513			if (optlen != TCPOLEN_WINDOW)
2514				continue;
2515			if (!(flags & TO_SYN))
2516				continue;
2517			to->to_flags |= TOF_SCALE;
2518			to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
2519			break;
2520		case TCPOPT_TIMESTAMP:
2521			if (optlen != TCPOLEN_TIMESTAMP)
2522				continue;
2523			to->to_flags |= TOF_TS;
2524			bcopy((char *)cp + 2,
2525			    (char *)&to->to_tsval, sizeof(to->to_tsval));
2526			to->to_tsval = ntohl(to->to_tsval);
2527			bcopy((char *)cp + 6,
2528			    (char *)&to->to_tsecr, sizeof(to->to_tsecr));
2529			to->to_tsecr = ntohl(to->to_tsecr);
2530			break;
2531#ifdef TCP_SIGNATURE
2532		/*
2533		 * XXX In order to reply to a host which has set the
2534		 * TCP_SIGNATURE option in its initial SYN, we have to
2535		 * record the fact that the option was observed here
2536		 * for the syncache code to perform the correct response.
2537		 */
2538		case TCPOPT_SIGNATURE:
2539			if (optlen != TCPOLEN_SIGNATURE)
2540				continue;
2541			to->to_flags |= TOF_SIGNATURE;
2542			to->to_signature = cp + 2;
2543			break;
2544#endif
2545		case TCPOPT_SACK_PERMITTED:
2546			if (optlen != TCPOLEN_SACK_PERMITTED)
2547				continue;
2548			if (!(flags & TO_SYN))
2549				continue;
2550			if (!tcp_do_sack)
2551				continue;
2552			to->to_flags |= TOF_SACKPERM;
2553			break;
2554		case TCPOPT_SACK:
2555			if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
2556				continue;
2557			if (flags & TO_SYN)
2558				continue;
2559			to->to_flags |= TOF_SACK;
2560			to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
2561			to->to_sacks = cp + 2;
2562			tcpstat.tcps_sack_rcv_blocks++;
2563			break;
2564		default:
2565			continue;
2566		}
2567	}
2568}
2569
2570/*
2571 * Pull out of band byte out of a segment so
2572 * it doesn't appear in the user's data queue.
2573 * It is still reflected in the segment length for
2574 * sequencing purposes.
2575 */
2576static void
2577tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
2578    int off)
2579{
2580	int cnt = off + th->th_urp - 1;
2581
2582	while (cnt >= 0) {
2583		if (m->m_len > cnt) {
2584			char *cp = mtod(m, caddr_t) + cnt;
2585			struct tcpcb *tp = sototcpcb(so);
2586
2587			tp->t_iobc = *cp;
2588			tp->t_oobflags |= TCPOOB_HAVEDATA;
2589			bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
2590			m->m_len--;
2591			if (m->m_flags & M_PKTHDR)
2592				m->m_pkthdr.len--;
2593			return;
2594		}
2595		cnt -= m->m_len;
2596		m = m->m_next;
2597		if (m == NULL)
2598			break;
2599	}
2600	panic("tcp_pulloutofband");
2601}
2602
2603/*
2604 * Collect new round-trip time estimate
2605 * and update averages and current timeout.
2606 */
2607static void
2608tcp_xmit_timer(struct tcpcb *tp, int rtt)
2609{
2610	int delta;
2611
2612	INP_LOCK_ASSERT(tp->t_inpcb);
2613
2614	tcpstat.tcps_rttupdated++;
2615	tp->t_rttupdated++;
2616	if (tp->t_srtt != 0) {
2617		/*
2618		 * srtt is stored as fixed point with 5 bits after the
2619		 * binary point (i.e., scaled by 8).  The following magic
2620		 * is equivalent to the smoothing algorithm in rfc793 with
2621		 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2622		 * point).  Adjust rtt to origin 0.
2623		 */
2624		delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2625			- (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2626
2627		if ((tp->t_srtt += delta) <= 0)
2628			tp->t_srtt = 1;
2629
2630		/*
2631		 * We accumulate a smoothed rtt variance (actually, a
2632		 * smoothed mean difference), then set the retransmit
2633		 * timer to smoothed rtt + 4 times the smoothed variance.
2634		 * rttvar is stored as fixed point with 4 bits after the
2635		 * binary point (scaled by 16).  The following is
2636		 * equivalent to rfc793 smoothing with an alpha of .75
2637		 * (rttvar = rttvar*3/4 + |delta| / 4).  This replaces
2638		 * rfc793's wired-in beta.
2639		 */
2640		if (delta < 0)
2641			delta = -delta;
2642		delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2643		if ((tp->t_rttvar += delta) <= 0)
2644			tp->t_rttvar = 1;
2645		if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2646		    tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2647	} else {
2648		/*
2649		 * No rtt measurement yet - use the unsmoothed rtt.
2650		 * Set the variance to half the rtt (so our first
2651		 * retransmit happens at 3*rtt).
2652		 */
2653		tp->t_srtt = rtt << TCP_RTT_SHIFT;
2654		tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2655		tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2656	}
2657	tp->t_rtttime = 0;
2658	tp->t_rxtshift = 0;
2659
2660	/*
2661	 * the retransmit should happen at rtt + 4 * rttvar.
2662	 * Because of the way we do the smoothing, srtt and rttvar
2663	 * will each average +1/2 tick of bias.  When we compute
2664	 * the retransmit timer, we want 1/2 tick of rounding and
2665	 * 1 extra tick because of +-1/2 tick uncertainty in the
2666	 * firing of the timer.  The bias will give us exactly the
2667	 * 1.5 tick we need.  But, because the bias is
2668	 * statistical, we have to test that we don't drop below
2669	 * the minimum feasible timer (which is 2 ticks).
2670	 */
2671	TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2672		      max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2673
2674	/*
2675	 * We received an ack for a packet that wasn't retransmitted;
2676	 * it is probably safe to discard any error indications we've
2677	 * received recently.  This isn't quite right, but close enough
2678	 * for now (a route might have failed after we sent a segment,
2679	 * and the return path might not be symmetrical).
2680	 */
2681	tp->t_softerror = 0;
2682}
2683
2684/*
2685 * Determine a reasonable value for maxseg size.
2686 * If the route is known, check route for mtu.
2687 * If none, use an mss that can be handled on the outgoing
2688 * interface without forcing IP to fragment; if bigger than
2689 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2690 * to utilize large mbufs.  If no route is found, route has no mtu,
2691 * or the destination isn't local, use a default, hopefully conservative
2692 * size (usually 512 or the default IP max size, but no more than the mtu
2693 * of the interface), as we can't discover anything about intervening
2694 * gateways or networks.  We also initialize the congestion/slow start
2695 * window to be a single segment if the destination isn't local.
2696 * While looking at the routing entry, we also initialize other path-dependent
2697 * parameters from pre-set or cached values in the routing entry.
2698 *
2699 * Also take into account the space needed for options that we
2700 * send regularly.  Make maxseg shorter by that amount to assure
2701 * that we can send maxseg amount of data even when the options
2702 * are present.  Store the upper limit of the length of options plus
2703 * data in maxopd.
2704 *
2705 * In case of T/TCP, we call this routine during implicit connection
2706 * setup as well (offer = -1), to initialize maxseg from the cached
2707 * MSS of our peer.
2708 *
2709 * NOTE that this routine is only called when we process an incoming
2710 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt().
2711 */
2712void
2713tcp_mss(struct tcpcb *tp, int offer)
2714{
2715	int rtt, mss;
2716	u_long bufsize;
2717	u_long maxmtu;
2718	struct inpcb *inp = tp->t_inpcb;
2719	struct socket *so;
2720	struct hc_metrics_lite metrics;
2721	int origoffer = offer;
2722	int mtuflags = 0;
2723#ifdef INET6
2724	int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
2725	size_t min_protoh = isipv6 ?
2726			    sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
2727			    sizeof (struct tcpiphdr);
2728#else
2729	const size_t min_protoh = sizeof(struct tcpiphdr);
2730#endif
2731
2732	/* Initialize. */
2733#ifdef INET6
2734	if (isipv6) {
2735		maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags);
2736		tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt;
2737	} else
2738#endif
2739	{
2740		maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags);
2741		tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
2742	}
2743	so = inp->inp_socket;
2744
2745	/*
2746	 * No route to sender, stay with default mss and return.
2747	 */
2748	if (maxmtu == 0)
2749		return;
2750
2751	/* What have we got? */
2752	switch (offer) {
2753		case 0:
2754			/*
2755			 * Offer == 0 means that there was no MSS on the SYN
2756			 * segment, in this case we use tcp_mssdflt.
2757			 */
2758			offer =
2759#ifdef INET6
2760				isipv6 ? tcp_v6mssdflt :
2761#endif
2762				tcp_mssdflt;
2763			break;
2764
2765		case -1:
2766			/*
2767			 * Offer == -1 means that we didn't receive SYN yet.
2768			 */
2769			/* FALLTHROUGH */
2770
2771		default:
2772			/*
2773			 * Prevent DoS attack with too small MSS. Round up
2774			 * to at least minmss.
2775			 */
2776			offer = max(offer, tcp_minmss);
2777			/*
2778			 * Sanity check: make sure that maxopd will be large
2779			 * enough to allow some data on segments even if the
2780			 * all the option space is used (40bytes).  Otherwise
2781			 * funny things may happen in tcp_output.
2782			 */
2783			offer = max(offer, 64);
2784	}
2785
2786	/*
2787	 * rmx information is now retrieved from tcp_hostcache.
2788	 */
2789	tcp_hc_get(&inp->inp_inc, &metrics);
2790
2791	/*
2792	 * If there's a discovered mtu int tcp hostcache, use it
2793	 * else, use the link mtu.
2794	 */
2795	if (metrics.rmx_mtu)
2796		mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
2797	else {
2798#ifdef INET6
2799		if (isipv6) {
2800			mss = maxmtu - min_protoh;
2801			if (!path_mtu_discovery &&
2802			    !in6_localaddr(&inp->in6p_faddr))
2803				mss = min(mss, tcp_v6mssdflt);
2804		} else
2805#endif
2806		{
2807			mss = maxmtu - min_protoh;
2808			if (!path_mtu_discovery &&
2809			    !in_localaddr(inp->inp_faddr))
2810				mss = min(mss, tcp_mssdflt);
2811		}
2812	}
2813	mss = min(mss, offer);
2814
2815	/*
2816	 * maxopd stores the maximum length of data AND options
2817	 * in a segment; maxseg is the amount of data in a normal
2818	 * segment.  We need to store this value (maxopd) apart
2819	 * from maxseg, because now every segment carries options
2820	 * and thus we normally have somewhat less data in segments.
2821	 */
2822	tp->t_maxopd = mss;
2823
2824	/*
2825	 * origoffer==-1 indicates that no segments were received yet.
2826	 * In this case we just guess.
2827	 */
2828	if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2829	    (origoffer == -1 ||
2830	     (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
2831		mss -= TCPOLEN_TSTAMP_APPA;
2832	tp->t_maxseg = mss;
2833
2834#if	(MCLBYTES & (MCLBYTES - 1)) == 0
2835		if (mss > MCLBYTES)
2836			mss &= ~(MCLBYTES-1);
2837#else
2838		if (mss > MCLBYTES)
2839			mss = mss / MCLBYTES * MCLBYTES;
2840#endif
2841	tp->t_maxseg = mss;
2842
2843	/*
2844	 * If there's a pipesize, change the socket buffer to that size,
2845	 * don't change if sb_hiwat is different than default (then it
2846	 * has been changed on purpose with setsockopt).
2847	 * Make the socket buffers an integral number of mss units;
2848	 * if the mss is larger than the socket buffer, decrease the mss.
2849	 */
2850	SOCKBUF_LOCK(&so->so_snd);
2851	if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe)
2852		bufsize = metrics.rmx_sendpipe;
2853	else
2854		bufsize = so->so_snd.sb_hiwat;
2855	if (bufsize < mss)
2856		mss = bufsize;
2857	else {
2858		bufsize = roundup(bufsize, mss);
2859		if (bufsize > sb_max)
2860			bufsize = sb_max;
2861		if (bufsize > so->so_snd.sb_hiwat)
2862			(void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
2863	}
2864	SOCKBUF_UNLOCK(&so->so_snd);
2865	tp->t_maxseg = mss;
2866
2867	SOCKBUF_LOCK(&so->so_rcv);
2868	if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe)
2869		bufsize = metrics.rmx_recvpipe;
2870	else
2871		bufsize = so->so_rcv.sb_hiwat;
2872	if (bufsize > mss) {
2873		bufsize = roundup(bufsize, mss);
2874		if (bufsize > sb_max)
2875			bufsize = sb_max;
2876		if (bufsize > so->so_rcv.sb_hiwat)
2877			(void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
2878	}
2879	SOCKBUF_UNLOCK(&so->so_rcv);
2880	/*
2881	 * While we're here, check the others too.
2882	 */
2883	if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
2884		tp->t_srtt = rtt;
2885		tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
2886		tcpstat.tcps_usedrtt++;
2887		if (metrics.rmx_rttvar) {
2888			tp->t_rttvar = metrics.rmx_rttvar;
2889			tcpstat.tcps_usedrttvar++;
2890		} else {
2891			/* default variation is +- 1 rtt */
2892			tp->t_rttvar =
2893			    tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
2894		}
2895		TCPT_RANGESET(tp->t_rxtcur,
2896			      ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
2897			      tp->t_rttmin, TCPTV_REXMTMAX);
2898	}
2899	if (metrics.rmx_ssthresh) {
2900		/*
2901		 * There's some sort of gateway or interface
2902		 * buffer limit on the path.  Use this to set
2903		 * the slow start threshhold, but set the
2904		 * threshold to no less than 2*mss.
2905		 */
2906		tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh);
2907		tcpstat.tcps_usedssthresh++;
2908	}
2909	if (metrics.rmx_bandwidth)
2910		tp->snd_bandwidth = metrics.rmx_bandwidth;
2911
2912	/*
2913	 * Set the slow-start flight size depending on whether this
2914	 * is a local network or not.
2915	 *
2916	 * Extend this so we cache the cwnd too and retrieve it here.
2917	 * Make cwnd even bigger than RFC3390 suggests but only if we
2918	 * have previous experience with the remote host. Be careful
2919	 * not make cwnd bigger than remote receive window or our own
2920	 * send socket buffer. Maybe put some additional upper bound
2921	 * on the retrieved cwnd. Should do incremental updates to
2922	 * hostcache when cwnd collapses so next connection doesn't
2923	 * overloads the path again.
2924	 *
2925	 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost.
2926	 * We currently check only in syncache_socket for that.
2927	 */
2928#define TCP_METRICS_CWND
2929#ifdef TCP_METRICS_CWND
2930	if (metrics.rmx_cwnd)
2931		tp->snd_cwnd = max(mss,
2932				min(metrics.rmx_cwnd / 2,
2933				 min(tp->snd_wnd, so->so_snd.sb_hiwat)));
2934	else
2935#endif
2936	if (tcp_do_rfc3390)
2937		tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380));
2938#ifdef INET6
2939	else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
2940		 (!isipv6 && in_localaddr(inp->inp_faddr)))
2941#else
2942	else if (in_localaddr(inp->inp_faddr))
2943#endif
2944		tp->snd_cwnd = mss * ss_fltsz_local;
2945	else
2946		tp->snd_cwnd = mss * ss_fltsz;
2947
2948	/* Check the interface for TSO capabilities. */
2949	if (mtuflags & CSUM_TSO)
2950		tp->t_flags |= TF_TSO;
2951}
2952
2953/*
2954 * Determine the MSS option to send on an outgoing SYN.
2955 */
2956int
2957tcp_mssopt(struct in_conninfo *inc)
2958{
2959	int mss = 0;
2960	u_long maxmtu = 0;
2961	u_long thcmtu = 0;
2962	size_t min_protoh;
2963#ifdef INET6
2964	int isipv6 = inc->inc_isipv6 ? 1 : 0;
2965#endif
2966
2967	KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
2968
2969#ifdef INET6
2970	if (isipv6) {
2971		mss = tcp_v6mssdflt;
2972		maxmtu = tcp_maxmtu6(inc, NULL);
2973		thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
2974		min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
2975	} else
2976#endif
2977	{
2978		mss = tcp_mssdflt;
2979		maxmtu = tcp_maxmtu(inc, NULL);
2980		thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
2981		min_protoh = sizeof(struct tcpiphdr);
2982	}
2983	if (maxmtu && thcmtu)
2984		mss = min(maxmtu, thcmtu) - min_protoh;
2985	else if (maxmtu || thcmtu)
2986		mss = max(maxmtu, thcmtu) - min_protoh;
2987
2988	return (mss);
2989}
2990
2991
2992/*
2993 * On a partial ack arrives, force the retransmission of the
2994 * next unacknowledged segment.  Do not clear tp->t_dupacks.
2995 * By setting snd_nxt to ti_ack, this forces retransmission timer to
2996 * be started again.
2997 */
2998static void
2999tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3000{
3001	tcp_seq onxt = tp->snd_nxt;
3002	u_long  ocwnd = tp->snd_cwnd;
3003
3004	tcp_timer_activate(tp, TT_REXMT, 0);
3005	tp->t_rtttime = 0;
3006	tp->snd_nxt = th->th_ack;
3007	/*
3008	 * Set snd_cwnd to one segment beyond acknowledged offset.
3009	 * (tp->snd_una has not yet been updated when this function is called.)
3010	 */
3011	tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
3012	tp->t_flags |= TF_ACKNOW;
3013	(void) tcp_output(tp);
3014	tp->snd_cwnd = ocwnd;
3015	if (SEQ_GT(onxt, tp->snd_nxt))
3016		tp->snd_nxt = onxt;
3017	/*
3018	 * Partial window deflation.  Relies on fact that tp->snd_una
3019	 * not updated yet.
3020	 */
3021	if (tp->snd_cwnd > th->th_ack - tp->snd_una)
3022		tp->snd_cwnd -= th->th_ack - tp->snd_una;
3023	else
3024		tp->snd_cwnd = 0;
3025	tp->snd_cwnd += tp->t_maxseg;
3026}
3027