Deleted Added
full compact
tcp_reass.c (131079) tcp_reass.c (131151)
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
30 * $FreeBSD: head/sys/netinet/tcp_reass.c 131079 2004-06-25 04:11:26Z ps $
30 * $FreeBSD: head/sys/netinet/tcp_reass.c 131151 2004-06-26 19:10:39Z rwatson $
31 */
32
33#include "opt_ipfw.h" /* for ipfw_fwd */
34#include "opt_inet.h"
35#include "opt_inet6.h"
36#include "opt_ipsec.h"
37#include "opt_mac.h"
38#include "opt_tcpdebug.h"
39#include "opt_tcp_input.h"
40#include "opt_tcp_sack.h"
41
42#include <sys/param.h>
43#include <sys/kernel.h>
44#include <sys/mac.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/proc.h> /* for proc0 declaration */
48#include <sys/protosw.h>
49#include <sys/signalvar.h>
50#include <sys/socket.h>
51#include <sys/socketvar.h>
52#include <sys/sysctl.h>
53#include <sys/syslog.h>
54#include <sys/systm.h>
55
56#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
57
58#include <vm/uma.h>
59
60#include <net/if.h>
61#include <net/route.h>
62
63#include <netinet/in.h>
64#include <netinet/in_pcb.h>
65#include <netinet/in_systm.h>
66#include <netinet/in_var.h>
67#include <netinet/ip.h>
68#include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
69#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
70#include <netinet/ip_var.h>
71#include <netinet/ip6.h>
72#include <netinet/icmp6.h>
73#include <netinet6/in6_pcb.h>
74#include <netinet6/ip6_var.h>
75#include <netinet6/nd6.h>
76#include <netinet/tcp.h>
77#include <netinet/tcp_fsm.h>
78#include <netinet/tcp_seq.h>
79#include <netinet/tcp_timer.h>
80#include <netinet/tcp_var.h>
81#include <netinet6/tcp6_var.h>
82#include <netinet/tcpip.h>
83#ifdef TCPDEBUG
84#include <netinet/tcp_debug.h>
85#endif /* TCPDEBUG */
86
87#ifdef FAST_IPSEC
88#include <netipsec/ipsec.h>
89#include <netipsec/ipsec6.h>
90#endif /*FAST_IPSEC*/
91
92#ifdef IPSEC
93#include <netinet6/ipsec.h>
94#include <netinet6/ipsec6.h>
95#include <netkey/key.h>
96#endif /*IPSEC*/
97
98#include <machine/in_cksum.h>
99
100static const int tcprexmtthresh = 3;
101tcp_cc tcp_ccgen;
102
103struct tcpstat tcpstat;
104SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
105 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
106
107static int log_in_vain = 0;
108SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
109 &log_in_vain, 0, "Log all incoming TCP connections");
110
111static int blackhole = 0;
112SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
113 &blackhole, 0, "Do not send RST when dropping refused connections");
114
115int tcp_delack_enabled = 1;
116SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
117 &tcp_delack_enabled, 0,
118 "Delay ACK to try and piggyback it onto a data packet");
119
120#ifdef TCP_DROP_SYNFIN
121static int drop_synfin = 0;
122SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
123 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
124#endif
125
126static int tcp_do_rfc3042 = 1;
127SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
128 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)");
129
130static int tcp_do_rfc3390 = 1;
131SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
132 &tcp_do_rfc3390, 0,
133 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
134
135SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
136 "TCP Segment Reassembly Queue");
137
138static int tcp_reass_maxseg = 0;
139SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN,
140 &tcp_reass_maxseg, 0,
141 "Global maximum number of TCP Segments in Reassembly Queue");
142
143int tcp_reass_qsize = 0;
144SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD,
145 &tcp_reass_qsize, 0,
146 "Global number of TCP Segments currently in Reassembly Queue");
147
148static int tcp_reass_maxqlen = 48;
149SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxqlen, CTLFLAG_RW,
150 &tcp_reass_maxqlen, 0,
151 "Maximum number of TCP Segments per individual Reassembly Queue");
152
153static int tcp_reass_overflows = 0;
154SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD,
155 &tcp_reass_overflows, 0,
156 "Global number of TCP Segment Reassembly Queue Overflows");
157
158struct inpcbhead tcb;
159#define tcb6 tcb /* for KAME src sync over BSD*'s */
160struct inpcbinfo tcbinfo;
161struct mtx *tcbinfo_mtx;
162
163static void tcp_dooptions(struct tcpcb *, struct tcpopt *, u_char *,
164 int, int, struct tcphdr *);
165
166static void tcp_pulloutofband(struct socket *,
167 struct tcphdr *, struct mbuf *, int);
168static int tcp_reass(struct tcpcb *, struct tcphdr *, int *,
169 struct mbuf *);
170static void tcp_xmit_timer(struct tcpcb *, int);
171static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
172static int tcp_timewait(struct tcptw *, struct tcpopt *,
173 struct tcphdr *, struct mbuf *, int);
174
175/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
176#ifdef INET6
177#define ND6_HINT(tp) \
178do { \
179 if ((tp) && (tp)->t_inpcb && \
180 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
181 nd6_nud_hint(NULL, NULL, 0); \
182} while (0)
183#else
184#define ND6_HINT(tp)
185#endif
186
187/*
188 * Indicate whether this ack should be delayed. We can delay the ack if
189 * - there is no delayed ack timer in progress and
190 * - our last ack wasn't a 0-sized window. We never want to delay
191 * the ack that opens up a 0-sized window and
192 * - delayed acks are enabled or
193 * - this is a half-synchronized T/TCP connection.
194 */
195#define DELAY_ACK(tp) \
196 ((!callout_active(tp->tt_delack) && \
197 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
198 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
199
200/* Initialize TCP reassembly queue */
201uma_zone_t tcp_reass_zone;
202void
203tcp_reass_init()
204{
205 tcp_reass_maxseg = nmbclusters / 16;
206 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
207 &tcp_reass_maxseg);
208 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
209 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
210 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg);
211}
212
213static int
214tcp_reass(tp, th, tlenp, m)
215 register struct tcpcb *tp;
216 register struct tcphdr *th;
217 int *tlenp;
218 struct mbuf *m;
219{
220 struct tseg_qent *q;
221 struct tseg_qent *p = NULL;
222 struct tseg_qent *nq;
223 struct tseg_qent *te = NULL;
224 struct socket *so = tp->t_inpcb->inp_socket;
225 int flags;
226
227 /*
228 * XXX: tcp_reass() is rather inefficient with its data structures
229 * and should be rewritten (see NetBSD for optimizations). While
230 * doing that it should move to its own file tcp_reass.c.
231 */
232
233 /*
234 * Call with th==0 after become established to
235 * force pre-ESTABLISHED data up to user socket.
236 */
237 if (th == 0)
238 goto present;
239
240 /*
241 * Limit the number of segments in the reassembly queue to prevent
242 * holding on to too many segments (and thus running out of mbufs).
243 * Make sure to let the missing segment through which caused this
244 * queue. Always keep one global queue entry spare to be able to
245 * process the missing segment.
246 */
247 if (th->th_seq != tp->rcv_nxt &&
248 (tcp_reass_qsize + 1 >= tcp_reass_maxseg ||
249 tp->t_segqlen >= tcp_reass_maxqlen)) {
250 tcp_reass_overflows++;
251 tcpstat.tcps_rcvmemdrop++;
252 m_freem(m);
253 return (0);
254 }
255
256 /*
257 * Allocate a new queue entry. If we can't, or hit the zone limit
258 * just drop the pkt.
259 */
260 te = uma_zalloc(tcp_reass_zone, M_NOWAIT);
261 if (te == NULL) {
262 tcpstat.tcps_rcvmemdrop++;
263 m_freem(m);
264 return (0);
265 }
266 tp->t_segqlen++;
267 tcp_reass_qsize++;
268
269 /*
270 * Find a segment which begins after this one does.
271 */
272 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
273 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
274 break;
275 p = q;
276 }
277
278 /*
279 * If there is a preceding segment, it may provide some of
280 * our data already. If so, drop the data from the incoming
281 * segment. If it provides all of our data, drop us.
282 */
283 if (p != NULL) {
284 register int i;
285 /* conversion to int (in i) handles seq wraparound */
286 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
287 if (i > 0) {
288 if (i >= *tlenp) {
289 tcpstat.tcps_rcvduppack++;
290 tcpstat.tcps_rcvdupbyte += *tlenp;
291 m_freem(m);
292 uma_zfree(tcp_reass_zone, te);
293 tp->t_segqlen--;
294 tcp_reass_qsize--;
295 /*
296 * Try to present any queued data
297 * at the left window edge to the user.
298 * This is needed after the 3-WHS
299 * completes.
300 */
301 goto present; /* ??? */
302 }
303 m_adj(m, i);
304 *tlenp -= i;
305 th->th_seq += i;
306 }
307 }
308 tcpstat.tcps_rcvoopack++;
309 tcpstat.tcps_rcvoobyte += *tlenp;
310
311 /*
312 * While we overlap succeeding segments trim them or,
313 * if they are completely covered, dequeue them.
314 */
315 while (q) {
316 register int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
317 if (i <= 0)
318 break;
319 if (i < q->tqe_len) {
320 q->tqe_th->th_seq += i;
321 q->tqe_len -= i;
322 m_adj(q->tqe_m, i);
323 break;
324 }
325
326 nq = LIST_NEXT(q, tqe_q);
327 LIST_REMOVE(q, tqe_q);
328 m_freem(q->tqe_m);
329 uma_zfree(tcp_reass_zone, q);
330 tp->t_segqlen--;
331 tcp_reass_qsize--;
332 q = nq;
333 }
334
335 /* Insert the new segment queue entry into place. */
336 te->tqe_m = m;
337 te->tqe_th = th;
338 te->tqe_len = *tlenp;
339
340 if (p == NULL) {
341 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
342 } else {
343 LIST_INSERT_AFTER(p, te, tqe_q);
344 }
345
346present:
347 /*
348 * Present data to user, advancing rcv_nxt through
349 * completed sequence space.
350 */
351 if (!TCPS_HAVEESTABLISHED(tp->t_state))
352 return (0);
353 q = LIST_FIRST(&tp->t_segq);
354 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
355 return (0);
31 */
32
33#include "opt_ipfw.h" /* for ipfw_fwd */
34#include "opt_inet.h"
35#include "opt_inet6.h"
36#include "opt_ipsec.h"
37#include "opt_mac.h"
38#include "opt_tcpdebug.h"
39#include "opt_tcp_input.h"
40#include "opt_tcp_sack.h"
41
42#include <sys/param.h>
43#include <sys/kernel.h>
44#include <sys/mac.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/proc.h> /* for proc0 declaration */
48#include <sys/protosw.h>
49#include <sys/signalvar.h>
50#include <sys/socket.h>
51#include <sys/socketvar.h>
52#include <sys/sysctl.h>
53#include <sys/syslog.h>
54#include <sys/systm.h>
55
56#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
57
58#include <vm/uma.h>
59
60#include <net/if.h>
61#include <net/route.h>
62
63#include <netinet/in.h>
64#include <netinet/in_pcb.h>
65#include <netinet/in_systm.h>
66#include <netinet/in_var.h>
67#include <netinet/ip.h>
68#include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
69#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
70#include <netinet/ip_var.h>
71#include <netinet/ip6.h>
72#include <netinet/icmp6.h>
73#include <netinet6/in6_pcb.h>
74#include <netinet6/ip6_var.h>
75#include <netinet6/nd6.h>
76#include <netinet/tcp.h>
77#include <netinet/tcp_fsm.h>
78#include <netinet/tcp_seq.h>
79#include <netinet/tcp_timer.h>
80#include <netinet/tcp_var.h>
81#include <netinet6/tcp6_var.h>
82#include <netinet/tcpip.h>
83#ifdef TCPDEBUG
84#include <netinet/tcp_debug.h>
85#endif /* TCPDEBUG */
86
87#ifdef FAST_IPSEC
88#include <netipsec/ipsec.h>
89#include <netipsec/ipsec6.h>
90#endif /*FAST_IPSEC*/
91
92#ifdef IPSEC
93#include <netinet6/ipsec.h>
94#include <netinet6/ipsec6.h>
95#include <netkey/key.h>
96#endif /*IPSEC*/
97
98#include <machine/in_cksum.h>
99
100static const int tcprexmtthresh = 3;
101tcp_cc tcp_ccgen;
102
103struct tcpstat tcpstat;
104SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
105 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
106
107static int log_in_vain = 0;
108SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
109 &log_in_vain, 0, "Log all incoming TCP connections");
110
111static int blackhole = 0;
112SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
113 &blackhole, 0, "Do not send RST when dropping refused connections");
114
115int tcp_delack_enabled = 1;
116SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
117 &tcp_delack_enabled, 0,
118 "Delay ACK to try and piggyback it onto a data packet");
119
120#ifdef TCP_DROP_SYNFIN
121static int drop_synfin = 0;
122SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
123 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
124#endif
125
126static int tcp_do_rfc3042 = 1;
127SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
128 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)");
129
130static int tcp_do_rfc3390 = 1;
131SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
132 &tcp_do_rfc3390, 0,
133 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
134
135SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
136 "TCP Segment Reassembly Queue");
137
138static int tcp_reass_maxseg = 0;
139SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN,
140 &tcp_reass_maxseg, 0,
141 "Global maximum number of TCP Segments in Reassembly Queue");
142
143int tcp_reass_qsize = 0;
144SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD,
145 &tcp_reass_qsize, 0,
146 "Global number of TCP Segments currently in Reassembly Queue");
147
148static int tcp_reass_maxqlen = 48;
149SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxqlen, CTLFLAG_RW,
150 &tcp_reass_maxqlen, 0,
151 "Maximum number of TCP Segments per individual Reassembly Queue");
152
153static int tcp_reass_overflows = 0;
154SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD,
155 &tcp_reass_overflows, 0,
156 "Global number of TCP Segment Reassembly Queue Overflows");
157
158struct inpcbhead tcb;
159#define tcb6 tcb /* for KAME src sync over BSD*'s */
160struct inpcbinfo tcbinfo;
161struct mtx *tcbinfo_mtx;
162
163static void tcp_dooptions(struct tcpcb *, struct tcpopt *, u_char *,
164 int, int, struct tcphdr *);
165
166static void tcp_pulloutofband(struct socket *,
167 struct tcphdr *, struct mbuf *, int);
168static int tcp_reass(struct tcpcb *, struct tcphdr *, int *,
169 struct mbuf *);
170static void tcp_xmit_timer(struct tcpcb *, int);
171static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
172static int tcp_timewait(struct tcptw *, struct tcpopt *,
173 struct tcphdr *, struct mbuf *, int);
174
175/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
176#ifdef INET6
177#define ND6_HINT(tp) \
178do { \
179 if ((tp) && (tp)->t_inpcb && \
180 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
181 nd6_nud_hint(NULL, NULL, 0); \
182} while (0)
183#else
184#define ND6_HINT(tp)
185#endif
186
187/*
188 * Indicate whether this ack should be delayed. We can delay the ack if
189 * - there is no delayed ack timer in progress and
190 * - our last ack wasn't a 0-sized window. We never want to delay
191 * the ack that opens up a 0-sized window and
192 * - delayed acks are enabled or
193 * - this is a half-synchronized T/TCP connection.
194 */
195#define DELAY_ACK(tp) \
196 ((!callout_active(tp->tt_delack) && \
197 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
198 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
199
200/* Initialize TCP reassembly queue */
201uma_zone_t tcp_reass_zone;
202void
203tcp_reass_init()
204{
205 tcp_reass_maxseg = nmbclusters / 16;
206 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
207 &tcp_reass_maxseg);
208 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
209 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
210 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg);
211}
212
213static int
214tcp_reass(tp, th, tlenp, m)
215 register struct tcpcb *tp;
216 register struct tcphdr *th;
217 int *tlenp;
218 struct mbuf *m;
219{
220 struct tseg_qent *q;
221 struct tseg_qent *p = NULL;
222 struct tseg_qent *nq;
223 struct tseg_qent *te = NULL;
224 struct socket *so = tp->t_inpcb->inp_socket;
225 int flags;
226
227 /*
228 * XXX: tcp_reass() is rather inefficient with its data structures
229 * and should be rewritten (see NetBSD for optimizations). While
230 * doing that it should move to its own file tcp_reass.c.
231 */
232
233 /*
234 * Call with th==0 after become established to
235 * force pre-ESTABLISHED data up to user socket.
236 */
237 if (th == 0)
238 goto present;
239
240 /*
241 * Limit the number of segments in the reassembly queue to prevent
242 * holding on to too many segments (and thus running out of mbufs).
243 * Make sure to let the missing segment through which caused this
244 * queue. Always keep one global queue entry spare to be able to
245 * process the missing segment.
246 */
247 if (th->th_seq != tp->rcv_nxt &&
248 (tcp_reass_qsize + 1 >= tcp_reass_maxseg ||
249 tp->t_segqlen >= tcp_reass_maxqlen)) {
250 tcp_reass_overflows++;
251 tcpstat.tcps_rcvmemdrop++;
252 m_freem(m);
253 return (0);
254 }
255
256 /*
257 * Allocate a new queue entry. If we can't, or hit the zone limit
258 * just drop the pkt.
259 */
260 te = uma_zalloc(tcp_reass_zone, M_NOWAIT);
261 if (te == NULL) {
262 tcpstat.tcps_rcvmemdrop++;
263 m_freem(m);
264 return (0);
265 }
266 tp->t_segqlen++;
267 tcp_reass_qsize++;
268
269 /*
270 * Find a segment which begins after this one does.
271 */
272 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
273 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
274 break;
275 p = q;
276 }
277
278 /*
279 * If there is a preceding segment, it may provide some of
280 * our data already. If so, drop the data from the incoming
281 * segment. If it provides all of our data, drop us.
282 */
283 if (p != NULL) {
284 register int i;
285 /* conversion to int (in i) handles seq wraparound */
286 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
287 if (i > 0) {
288 if (i >= *tlenp) {
289 tcpstat.tcps_rcvduppack++;
290 tcpstat.tcps_rcvdupbyte += *tlenp;
291 m_freem(m);
292 uma_zfree(tcp_reass_zone, te);
293 tp->t_segqlen--;
294 tcp_reass_qsize--;
295 /*
296 * Try to present any queued data
297 * at the left window edge to the user.
298 * This is needed after the 3-WHS
299 * completes.
300 */
301 goto present; /* ??? */
302 }
303 m_adj(m, i);
304 *tlenp -= i;
305 th->th_seq += i;
306 }
307 }
308 tcpstat.tcps_rcvoopack++;
309 tcpstat.tcps_rcvoobyte += *tlenp;
310
311 /*
312 * While we overlap succeeding segments trim them or,
313 * if they are completely covered, dequeue them.
314 */
315 while (q) {
316 register int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
317 if (i <= 0)
318 break;
319 if (i < q->tqe_len) {
320 q->tqe_th->th_seq += i;
321 q->tqe_len -= i;
322 m_adj(q->tqe_m, i);
323 break;
324 }
325
326 nq = LIST_NEXT(q, tqe_q);
327 LIST_REMOVE(q, tqe_q);
328 m_freem(q->tqe_m);
329 uma_zfree(tcp_reass_zone, q);
330 tp->t_segqlen--;
331 tcp_reass_qsize--;
332 q = nq;
333 }
334
335 /* Insert the new segment queue entry into place. */
336 te->tqe_m = m;
337 te->tqe_th = th;
338 te->tqe_len = *tlenp;
339
340 if (p == NULL) {
341 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
342 } else {
343 LIST_INSERT_AFTER(p, te, tqe_q);
344 }
345
346present:
347 /*
348 * Present data to user, advancing rcv_nxt through
349 * completed sequence space.
350 */
351 if (!TCPS_HAVEESTABLISHED(tp->t_state))
352 return (0);
353 q = LIST_FIRST(&tp->t_segq);
354 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
355 return (0);
356 SOCKBUF_LOCK(&so->so_rcv);
356 do {
357 tp->rcv_nxt += q->tqe_len;
358 flags = q->tqe_th->th_flags & TH_FIN;
359 nq = LIST_NEXT(q, tqe_q);
360 LIST_REMOVE(q, tqe_q);
357 do {
358 tp->rcv_nxt += q->tqe_len;
359 flags = q->tqe_th->th_flags & TH_FIN;
360 nq = LIST_NEXT(q, tqe_q);
361 LIST_REMOVE(q, tqe_q);
362 /* Unlocked read. */
361 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
362 m_freem(q->tqe_m);
363 else
363 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
364 m_freem(q->tqe_m);
365 else
364 sbappendstream(&so->so_rcv, q->tqe_m);
366 sbappendstream_locked(&so->so_rcv, q->tqe_m);
365 uma_zfree(tcp_reass_zone, q);
366 tp->t_segqlen--;
367 tcp_reass_qsize--;
368 q = nq;
369 } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
370 ND6_HINT(tp);
367 uma_zfree(tcp_reass_zone, q);
368 tp->t_segqlen--;
369 tcp_reass_qsize--;
370 q = nq;
371 } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
372 ND6_HINT(tp);
371 sorwakeup(so);
373 sorwakeup_locked(so);
372 return (flags);
373}
374
375/*
376 * TCP input routine, follows pages 65-76 of the
377 * protocol specification dated September, 1981 very closely.
378 */
379#ifdef INET6
380int
381tcp6_input(mp, offp, proto)
382 struct mbuf **mp;
383 int *offp, proto;
384{
385 register struct mbuf *m = *mp;
386 struct in6_ifaddr *ia6;
387
388 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
389
390 /*
391 * draft-itojun-ipv6-tcp-to-anycast
392 * better place to put this in?
393 */
394 ia6 = ip6_getdstifaddr(m);
395 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
396 struct ip6_hdr *ip6;
397
398 ip6 = mtod(m, struct ip6_hdr *);
399 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
400 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
401 return IPPROTO_DONE;
402 }
403
404 tcp_input(m, *offp);
405 return IPPROTO_DONE;
406}
407#endif
408
409void
410tcp_input(m, off0)
411 register struct mbuf *m;
412 int off0;
413{
414 register struct tcphdr *th;
415 register struct ip *ip = NULL;
416 register struct ipovly *ipov;
417 register struct inpcb *inp = NULL;
418 u_char *optp = NULL;
419 int optlen = 0;
420 int len, tlen, off;
421 int drop_hdrlen;
422 register struct tcpcb *tp = 0;
423 register int thflags;
424 struct socket *so = 0;
425 int todrop, acked, ourfinisacked, needoutput = 0;
426 u_long tiwin;
427 struct tcpopt to; /* options in this segment */
428 struct rmxp_tao tao; /* our TAO cache entry */
429 int headlocked = 0;
430 struct sockaddr_in *next_hop = NULL;
431 int rstreason; /* For badport_bandlim accounting purposes */
432
433 struct ip6_hdr *ip6 = NULL;
434#ifdef INET6
435 int isipv6;
436#else
437 const int isipv6 = 0;
438#endif
439
440#ifdef TCPDEBUG
441 /*
442 * The size of tcp_saveipgen must be the size of the max ip header,
443 * now IPv6.
444 */
445 u_char tcp_saveipgen[40];
446 struct tcphdr tcp_savetcp;
447 short ostate = 0;
448#endif
449
450 /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */
451 next_hop = m_claim_next(m, PACKET_TAG_IPFORWARD);
452#ifdef INET6
453 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
454#endif
455 bzero(&tao, sizeof(tao));
456 bzero((char *)&to, sizeof(to));
457
458 tcpstat.tcps_rcvtotal++;
459
460 if (isipv6) {
461#ifdef INET6
462 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
463 ip6 = mtod(m, struct ip6_hdr *);
464 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
465 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
466 tcpstat.tcps_rcvbadsum++;
467 goto drop;
468 }
469 th = (struct tcphdr *)((caddr_t)ip6 + off0);
470
471 /*
472 * Be proactive about unspecified IPv6 address in source.
473 * As we use all-zero to indicate unbounded/unconnected pcb,
474 * unspecified IPv6 address can be used to confuse us.
475 *
476 * Note that packets with unspecified IPv6 destination is
477 * already dropped in ip6_input.
478 */
479 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
480 /* XXX stat */
481 goto drop;
482 }
483#else
484 th = NULL; /* XXX: avoid compiler warning */
485#endif
486 } else {
487 /*
488 * Get IP and TCP header together in first mbuf.
489 * Note: IP leaves IP header in first mbuf.
490 */
491 if (off0 > sizeof (struct ip)) {
492 ip_stripoptions(m, (struct mbuf *)0);
493 off0 = sizeof(struct ip);
494 }
495 if (m->m_len < sizeof (struct tcpiphdr)) {
496 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) {
497 tcpstat.tcps_rcvshort++;
498 return;
499 }
500 }
501 ip = mtod(m, struct ip *);
502 ipov = (struct ipovly *)ip;
503 th = (struct tcphdr *)((caddr_t)ip + off0);
504 tlen = ip->ip_len;
505
506 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
507 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
508 th->th_sum = m->m_pkthdr.csum_data;
509 else
510 th->th_sum = in_pseudo(ip->ip_src.s_addr,
511 ip->ip_dst.s_addr,
512 htonl(m->m_pkthdr.csum_data +
513 ip->ip_len +
514 IPPROTO_TCP));
515 th->th_sum ^= 0xffff;
516#ifdef TCPDEBUG
517 ipov->ih_len = (u_short)tlen;
518 ipov->ih_len = htons(ipov->ih_len);
519#endif
520 } else {
521 /*
522 * Checksum extended TCP header and data.
523 */
524 len = sizeof (struct ip) + tlen;
525 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
526 ipov->ih_len = (u_short)tlen;
527 ipov->ih_len = htons(ipov->ih_len);
528 th->th_sum = in_cksum(m, len);
529 }
530 if (th->th_sum) {
531 tcpstat.tcps_rcvbadsum++;
532 goto drop;
533 }
534#ifdef INET6
535 /* Re-initialization for later version check */
536 ip->ip_v = IPVERSION;
537#endif
538 }
539
540 /*
541 * Check that TCP offset makes sense,
542 * pull out TCP options and adjust length. XXX
543 */
544 off = th->th_off << 2;
545 if (off < sizeof (struct tcphdr) || off > tlen) {
546 tcpstat.tcps_rcvbadoff++;
547 goto drop;
548 }
549 tlen -= off; /* tlen is used instead of ti->ti_len */
550 if (off > sizeof (struct tcphdr)) {
551 if (isipv6) {
552#ifdef INET6
553 IP6_EXTHDR_CHECK(m, off0, off, );
554 ip6 = mtod(m, struct ip6_hdr *);
555 th = (struct tcphdr *)((caddr_t)ip6 + off0);
556#endif
557 } else {
558 if (m->m_len < sizeof(struct ip) + off) {
559 if ((m = m_pullup(m, sizeof (struct ip) + off))
560 == 0) {
561 tcpstat.tcps_rcvshort++;
562 return;
563 }
564 ip = mtod(m, struct ip *);
565 ipov = (struct ipovly *)ip;
566 th = (struct tcphdr *)((caddr_t)ip + off0);
567 }
568 }
569 optlen = off - sizeof (struct tcphdr);
570 optp = (u_char *)(th + 1);
571 }
572 thflags = th->th_flags;
573
574#ifdef TCP_DROP_SYNFIN
575 /*
576 * If the drop_synfin option is enabled, drop all packets with
577 * both the SYN and FIN bits set. This prevents e.g. nmap from
578 * identifying the TCP/IP stack.
579 *
580 * This is a violation of the TCP specification.
581 */
582 if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN))
583 goto drop;
584#endif
585
586 /*
587 * Convert TCP protocol specific fields to host format.
588 */
589 th->th_seq = ntohl(th->th_seq);
590 th->th_ack = ntohl(th->th_ack);
591 th->th_win = ntohs(th->th_win);
592 th->th_urp = ntohs(th->th_urp);
593
594 /*
595 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
596 * until after ip6_savecontrol() is called and before other functions
597 * which don't want those proto headers.
598 * Because ip6_savecontrol() is going to parse the mbuf to
599 * search for data to be passed up to user-land, it wants mbuf
600 * parameters to be unchanged.
601 * XXX: the call of ip6_savecontrol() has been obsoleted based on
602 * latest version of the advanced API (20020110).
603 */
604 drop_hdrlen = off0 + off;
605
606 /*
607 * Locate pcb for segment.
608 */
609 INP_INFO_WLOCK(&tcbinfo);
610 headlocked = 1;
611findpcb:
612 /* IPFIREWALL_FORWARD section */
613 if (next_hop != NULL && isipv6 == 0) { /* IPv6 support is not yet */
614 /*
615 * Transparently forwarded. Pretend to be the destination.
616 * already got one like this?
617 */
618 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
619 ip->ip_dst, th->th_dport,
620 0, m->m_pkthdr.rcvif);
621 if (!inp) {
622 /* It's new. Try find the ambushing socket. */
623 inp = in_pcblookup_hash(&tcbinfo,
624 ip->ip_src, th->th_sport,
625 next_hop->sin_addr,
626 next_hop->sin_port ?
627 ntohs(next_hop->sin_port) :
628 th->th_dport,
629 1, m->m_pkthdr.rcvif);
630 }
631 } else {
632 if (isipv6) {
633#ifdef INET6
634 inp = in6_pcblookup_hash(&tcbinfo,
635 &ip6->ip6_src, th->th_sport,
636 &ip6->ip6_dst, th->th_dport,
637 1, m->m_pkthdr.rcvif);
638#endif
639 } else
640 inp = in_pcblookup_hash(&tcbinfo,
641 ip->ip_src, th->th_sport,
642 ip->ip_dst, th->th_dport,
643 1, m->m_pkthdr.rcvif);
644 }
645
646#if defined(IPSEC) || defined(FAST_IPSEC)
647#ifdef INET6
648 if (isipv6) {
649 if (inp != NULL && ipsec6_in_reject(m, inp)) {
650#ifdef IPSEC
651 ipsec6stat.in_polvio++;
652#endif
653 goto drop;
654 }
655 } else
656#endif /* INET6 */
657 if (inp != NULL && ipsec4_in_reject(m, inp)) {
658#ifdef IPSEC
659 ipsecstat.in_polvio++;
660#endif
661 goto drop;
662 }
663#endif /*IPSEC || FAST_IPSEC*/
664
665 /*
666 * If the state is CLOSED (i.e., TCB does not exist) then
667 * all data in the incoming segment is discarded.
668 * If the TCB exists but is in CLOSED state, it is embryonic,
669 * but should either do a listen or a connect soon.
670 */
671 if (inp == NULL) {
672 if (log_in_vain) {
673#ifdef INET6
674 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2];
675#else
676 char dbuf[4*sizeof "123"], sbuf[4*sizeof "123"];
677#endif
678
679 if (isipv6) {
680#ifdef INET6
681 strcpy(dbuf, "[");
682 strcpy(sbuf, "[");
683 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst));
684 strcat(sbuf, ip6_sprintf(&ip6->ip6_src));
685 strcat(dbuf, "]");
686 strcat(sbuf, "]");
687#endif
688 } else {
689 strcpy(dbuf, inet_ntoa(ip->ip_dst));
690 strcpy(sbuf, inet_ntoa(ip->ip_src));
691 }
692 switch (log_in_vain) {
693 case 1:
694 if ((thflags & TH_SYN) == 0)
695 break;
696 /* FALLTHROUGH */
697 case 2:
698 log(LOG_INFO,
699 "Connection attempt to TCP %s:%d "
700 "from %s:%d flags:0x%02x\n",
701 dbuf, ntohs(th->th_dport), sbuf,
702 ntohs(th->th_sport), thflags);
703 break;
704 default:
705 break;
706 }
707 }
708 if (blackhole) {
709 switch (blackhole) {
710 case 1:
711 if (thflags & TH_SYN)
712 goto drop;
713 break;
714 case 2:
715 goto drop;
716 default:
717 goto drop;
718 }
719 }
720 rstreason = BANDLIM_RST_CLOSEDPORT;
721 goto dropwithreset;
722 }
723 INP_LOCK(inp);
724 if (inp->inp_vflag & INP_TIMEWAIT) {
725 /*
726 * The only option of relevance is TOF_CC, and only if
727 * present in a SYN segment. See tcp_timewait().
728 */
729 if (thflags & TH_SYN)
730 tcp_dooptions((struct tcpcb *)NULL, &to, optp, optlen, 1, th);
731 if (tcp_timewait((struct tcptw *)inp->inp_ppcb,
732 &to, th, m, tlen))
733 goto findpcb;
734 /*
735 * tcp_timewait unlocks inp.
736 */
737 INP_INFO_WUNLOCK(&tcbinfo);
738 return;
739 }
740 tp = intotcpcb(inp);
741 if (tp == 0) {
742 INP_UNLOCK(inp);
743 rstreason = BANDLIM_RST_CLOSEDPORT;
744 goto dropwithreset;
745 }
746 if (tp->t_state == TCPS_CLOSED)
747 goto drop;
748
749 /* Unscale the window into a 32-bit value. */
750 if ((thflags & TH_SYN) == 0)
751 tiwin = th->th_win << tp->snd_scale;
752 else
753 tiwin = th->th_win;
754
755#ifdef MAC
756 INP_LOCK_ASSERT(inp);
757 if (mac_check_inpcb_deliver(inp, m))
758 goto drop;
759#endif
760 so = inp->inp_socket;
761#ifdef TCPDEBUG
762 if (so->so_options & SO_DEBUG) {
763 ostate = tp->t_state;
764 if (isipv6)
765 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
766 else
767 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
768 tcp_savetcp = *th;
769 }
770#endif
771 if (so->so_options & SO_ACCEPTCONN) {
772 struct in_conninfo inc;
773
774#ifdef INET6
775 inc.inc_isipv6 = isipv6;
776#endif
777 if (isipv6) {
778 inc.inc6_faddr = ip6->ip6_src;
779 inc.inc6_laddr = ip6->ip6_dst;
780 } else {
781 inc.inc_faddr = ip->ip_src;
782 inc.inc_laddr = ip->ip_dst;
783 }
784 inc.inc_fport = th->th_sport;
785 inc.inc_lport = th->th_dport;
786
787 /*
788 * If the state is LISTEN then ignore segment if it contains
789 * a RST. If the segment contains an ACK then it is bad and
790 * send a RST. If it does not contain a SYN then it is not
791 * interesting; drop it.
792 *
793 * If the state is SYN_RECEIVED (syncache) and seg contains
794 * an ACK, but not for our SYN/ACK, send a RST. If the seg
795 * contains a RST, check the sequence number to see if it
796 * is a valid reset segment.
797 */
798 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) {
799 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
800 if (!syncache_expand(&inc, th, &so, m)) {
801 /*
802 * No syncache entry, or ACK was not
803 * for our SYN/ACK. Send a RST.
804 */
805 tcpstat.tcps_badsyn++;
806 rstreason = BANDLIM_RST_OPENPORT;
807 goto dropwithreset;
808 }
809 if (so == NULL) {
810 /*
811 * Could not complete 3-way handshake,
812 * connection is being closed down, and
813 * syncache will free mbuf.
814 */
815 INP_UNLOCK(inp);
816 INP_INFO_WUNLOCK(&tcbinfo);
817 return;
818 }
819 /*
820 * Socket is created in state SYN_RECEIVED.
821 * Continue processing segment.
822 */
823 INP_UNLOCK(inp);
824 inp = sotoinpcb(so);
825 INP_LOCK(inp);
826 tp = intotcpcb(inp);
827 /*
828 * This is what would have happened in
829 * tcp_output() when the SYN,ACK was sent.
830 */
831 tp->snd_up = tp->snd_una;
832 tp->snd_max = tp->snd_nxt = tp->iss + 1;
833 tp->last_ack_sent = tp->rcv_nxt;
834 /*
835 * RFC1323: The window in SYN & SYN/ACK
836 * segments is never scaled.
837 */
838 tp->snd_wnd = tiwin; /* unscaled */
839 goto after_listen;
840 }
841 if (thflags & TH_RST) {
842 syncache_chkrst(&inc, th);
843 goto drop;
844 }
845 if (thflags & TH_ACK) {
846 syncache_badack(&inc);
847 tcpstat.tcps_badsyn++;
848 rstreason = BANDLIM_RST_OPENPORT;
849 goto dropwithreset;
850 }
851 goto drop;
852 }
853
854 /*
855 * Segment's flags are (SYN) or (SYN|FIN).
856 */
857#ifdef INET6
858 /*
859 * If deprecated address is forbidden,
860 * we do not accept SYN to deprecated interface
861 * address to prevent any new inbound connection from
862 * getting established.
863 * When we do not accept SYN, we send a TCP RST,
864 * with deprecated source address (instead of dropping
865 * it). We compromise it as it is much better for peer
866 * to send a RST, and RST will be the final packet
867 * for the exchange.
868 *
869 * If we do not forbid deprecated addresses, we accept
870 * the SYN packet. RFC2462 does not suggest dropping
871 * SYN in this case.
872 * If we decipher RFC2462 5.5.4, it says like this:
873 * 1. use of deprecated addr with existing
874 * communication is okay - "SHOULD continue to be
875 * used"
876 * 2. use of it with new communication:
877 * (2a) "SHOULD NOT be used if alternate address
878 * with sufficient scope is available"
879 * (2b) nothing mentioned otherwise.
880 * Here we fall into (2b) case as we have no choice in
881 * our source address selection - we must obey the peer.
882 *
883 * The wording in RFC2462 is confusing, and there are
884 * multiple description text for deprecated address
885 * handling - worse, they are not exactly the same.
886 * I believe 5.5.4 is the best one, so we follow 5.5.4.
887 */
888 if (isipv6 && !ip6_use_deprecated) {
889 struct in6_ifaddr *ia6;
890
891 if ((ia6 = ip6_getdstifaddr(m)) &&
892 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
893 INP_UNLOCK(inp);
894 tp = NULL;
895 rstreason = BANDLIM_RST_OPENPORT;
896 goto dropwithreset;
897 }
898 }
899#endif
900 /*
901 * If it is from this socket, drop it, it must be forged.
902 * Don't bother responding if the destination was a broadcast.
903 */
904 if (th->th_dport == th->th_sport) {
905 if (isipv6) {
906 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
907 &ip6->ip6_src))
908 goto drop;
909 } else {
910 if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
911 goto drop;
912 }
913 }
914 /*
915 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
916 *
917 * Note that it is quite possible to receive unicast
918 * link-layer packets with a broadcast IP address. Use
919 * in_broadcast() to find them.
920 */
921 if (m->m_flags & (M_BCAST|M_MCAST))
922 goto drop;
923 if (isipv6) {
924 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
925 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
926 goto drop;
927 } else {
928 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
929 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
930 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
931 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
932 goto drop;
933 }
934 /*
935 * SYN appears to be valid; create compressed TCP state
936 * for syncache, or perform t/tcp connection.
937 */
938 if (so->so_qlen <= so->so_qlimit) {
939#ifdef TCPDEBUG
940 if (so->so_options & SO_DEBUG)
941 tcp_trace(TA_INPUT, ostate, tp,
942 (void *)tcp_saveipgen, &tcp_savetcp, 0);
943#endif
944 tcp_dooptions(tp, &to, optp, optlen, 1, th);
945 if (!syncache_add(&inc, &to, th, &so, m))
946 goto drop;
947 if (so == NULL) {
948 /*
949 * Entry added to syncache, mbuf used to
950 * send SYN,ACK packet.
951 */
952 KASSERT(headlocked, ("headlocked"));
953 INP_UNLOCK(inp);
954 INP_INFO_WUNLOCK(&tcbinfo);
955 return;
956 }
957 /*
958 * Segment passed TAO tests.
959 */
960 INP_UNLOCK(inp);
961 inp = sotoinpcb(so);
962 INP_LOCK(inp);
963 tp = intotcpcb(inp);
964 tp->snd_wnd = tiwin;
965 tp->t_starttime = ticks;
966 tp->t_state = TCPS_ESTABLISHED;
967
968 /*
969 * T/TCP logic:
970 * If there is a FIN or if there is data, then
971 * delay SYN,ACK(SYN) in the hope of piggy-backing
972 * it on a response segment. Otherwise must send
973 * ACK now in case the other side is slow starting.
974 */
975 if (thflags & TH_FIN || tlen != 0)
976 tp->t_flags |= (TF_DELACK | TF_NEEDSYN);
977 else
978 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
979 tcpstat.tcps_connects++;
980 soisconnected(so);
981 goto trimthenstep6;
982 }
983 goto drop;
984 }
985after_listen:
986
987 /* XXX temp debugging */
988 /* should not happen - syncache should pick up these connections */
989 if (tp->t_state == TCPS_LISTEN)
990 panic("tcp_input: TCPS_LISTEN");
991
992 /*
993 * This is the second part of the MSS DoS prevention code (after
994 * minmss on the sending side) and it deals with too many too small
995 * tcp packets in a too short timeframe (1 second).
996 *
997 * For every full second we count the number of received packets
998 * and bytes. If we get a lot of packets per second for this connection
999 * (tcp_minmssoverload) we take a closer look at it and compute the
1000 * average packet size for the past second. If that is less than
1001 * tcp_minmss we get too many packets with very small payload which
1002 * is not good and burdens our system (and every packet generates
1003 * a wakeup to the process connected to our socket). We can reasonable
1004 * expect this to be small packet DoS attack to exhaust our CPU
1005 * cycles.
1006 *
1007 * Care has to be taken for the minimum packet overload value. This
1008 * value defines the minimum number of packets per second before we
1009 * start to worry. This must not be too low to avoid killing for
1010 * example interactive connections with many small packets like
1011 * telnet or SSH.
1012 *
1013 * Setting either tcp_minmssoverload or tcp_minmss to "0" disables
1014 * this check.
1015 *
1016 * Account for packet if payload packet, skip over ACK, etc.
1017 */
1018 if (tcp_minmss && tcp_minmssoverload &&
1019 tp->t_state == TCPS_ESTABLISHED && tlen > 0) {
1020 if (tp->rcv_second > ticks) {
1021 tp->rcv_pps++;
1022 tp->rcv_byps += tlen + off;
1023 if (tp->rcv_pps > tcp_minmssoverload) {
1024 if ((tp->rcv_byps / tp->rcv_pps) < tcp_minmss) {
1025 printf("too many small tcp packets from "
1026 "%s:%u, av. %lubyte/packet, "
1027 "dropping connection\n",
1028#ifdef INET6
1029 isipv6 ?
1030 ip6_sprintf(&inp->inp_inc.inc6_faddr) :
1031#endif
1032 inet_ntoa(inp->inp_inc.inc_faddr),
1033 inp->inp_inc.inc_fport,
1034 tp->rcv_byps / tp->rcv_pps);
1035 tp = tcp_drop(tp, ECONNRESET);
1036 tcpstat.tcps_minmssdrops++;
1037 goto drop;
1038 }
1039 }
1040 } else {
1041 tp->rcv_second = ticks + hz;
1042 tp->rcv_pps = 1;
1043 tp->rcv_byps = tlen + off;
1044 }
1045 }
1046
1047 /*
1048 * Segment received on connection.
1049 * Reset idle time and keep-alive timer.
1050 */
1051 tp->t_rcvtime = ticks;
1052 if (TCPS_HAVEESTABLISHED(tp->t_state))
1053 callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp);
1054
1055 /*
1056 * Process options only when we get SYN/ACK back. The SYN case
1057 * for incoming connections is handled in tcp_syncache.
1058 * XXX this is traditional behavior, may need to be cleaned up.
1059 */
1060 tcp_dooptions(tp, &to, optp, optlen, thflags & TH_SYN, th);
1061 if (thflags & TH_SYN) {
1062 if (to.to_flags & TOF_SCALE) {
1063 tp->t_flags |= TF_RCVD_SCALE;
1064 tp->requested_s_scale = to.to_requested_s_scale;
1065 }
1066 if (to.to_flags & TOF_TS) {
1067 tp->t_flags |= TF_RCVD_TSTMP;
1068 tp->ts_recent = to.to_tsval;
1069 tp->ts_recent_age = ticks;
1070 }
1071 if (to.to_flags & (TOF_CC|TOF_CCNEW))
1072 tp->t_flags |= TF_RCVD_CC;
1073 if (to.to_flags & TOF_MSS)
1074 tcp_mss(tp, to.to_mss);
1075 if (tp->sack_enable) {
1076 if (!(to.to_flags & TOF_SACK))
1077 tp->sack_enable = 0;
1078 else
1079 tp->t_flags |= TF_SACK_PERMIT;
1080 }
1081
1082 }
1083
1084 if (tp->sack_enable) {
1085 /* Delete stale (cumulatively acked) SACK holes */
1086 tcp_del_sackholes(tp, th);
1087 tp->rcv_laststart = th->th_seq; /* last recv'd segment*/
1088 tp->rcv_lastend = th->th_seq + tlen;
1089 }
1090
1091 /*
1092 * Header prediction: check for the two common cases
1093 * of a uni-directional data xfer. If the packet has
1094 * no control flags, is in-sequence, the window didn't
1095 * change and we're not retransmitting, it's a
1096 * candidate. If the length is zero and the ack moved
1097 * forward, we're the sender side of the xfer. Just
1098 * free the data acked & wake any higher level process
1099 * that was blocked waiting for space. If the length
1100 * is non-zero and the ack didn't move, we're the
1101 * receiver side. If we're getting packets in-order
1102 * (the reassembly queue is empty), add the data to
1103 * the socket buffer and note that we need a delayed ack.
1104 * Make sure that the hidden state-flags are also off.
1105 * Since we check for TCPS_ESTABLISHED above, it can only
1106 * be TH_NEEDSYN.
1107 */
1108 if (tp->t_state == TCPS_ESTABLISHED &&
1109 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1110 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1111 ((to.to_flags & TOF_TS) == 0 ||
1112 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
1113 /*
1114 * Using the CC option is compulsory if once started:
1115 * the segment is OK if no T/TCP was negotiated or
1116 * if the segment has a CC option equal to CCrecv
1117 */
1118 ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) != (TF_REQ_CC|TF_RCVD_CC) ||
1119 ((to.to_flags & TOF_CC) != 0 && to.to_cc == tp->cc_recv)) &&
1120 th->th_seq == tp->rcv_nxt &&
1121 tiwin && tiwin == tp->snd_wnd &&
1122 tp->snd_nxt == tp->snd_max) {
1123
1124 /*
1125 * If last ACK falls within this segment's sequence numbers,
1126 * record the timestamp.
1127 * NOTE that the test is modified according to the latest
1128 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1129 */
1130 if ((to.to_flags & TOF_TS) != 0 &&
1131 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1132 tp->ts_recent_age = ticks;
1133 tp->ts_recent = to.to_tsval;
1134 }
1135
1136 if (tlen == 0) {
1137 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1138 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1139 tp->snd_cwnd >= tp->snd_wnd &&
1140 ((!tcp_do_newreno && !tp->sack_enable &&
1141 tp->t_dupacks < tcprexmtthresh) ||
1142 ((tcp_do_newreno || tp->sack_enable) &&
1143 !IN_FASTRECOVERY(tp)))) {
1144 KASSERT(headlocked, ("headlocked"));
1145 INP_INFO_WUNLOCK(&tcbinfo);
1146 /*
1147 * this is a pure ack for outstanding data.
1148 */
1149 ++tcpstat.tcps_predack;
1150 /*
1151 * "bad retransmit" recovery
1152 */
1153 if (tp->t_rxtshift == 1 &&
1154 ticks < tp->t_badrxtwin) {
1155 ++tcpstat.tcps_sndrexmitbad;
1156 tp->snd_cwnd = tp->snd_cwnd_prev;
1157 tp->snd_ssthresh =
1158 tp->snd_ssthresh_prev;
1159 tp->snd_recover = tp->snd_recover_prev;
1160 if (tp->t_flags & TF_WASFRECOVERY)
1161 ENTER_FASTRECOVERY(tp);
1162 tp->snd_nxt = tp->snd_max;
1163 tp->t_badrxtwin = 0;
1164 }
1165
1166 /*
1167 * Recalculate the transmit timer / rtt.
1168 *
1169 * Some boxes send broken timestamp replies
1170 * during the SYN+ACK phase, ignore
1171 * timestamps of 0 or we could calculate a
1172 * huge RTT and blow up the retransmit timer.
1173 */
1174 if ((to.to_flags & TOF_TS) != 0 &&
1175 to.to_tsecr) {
1176 tcp_xmit_timer(tp,
1177 ticks - to.to_tsecr + 1);
1178 } else if (tp->t_rtttime &&
1179 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1180 tcp_xmit_timer(tp,
1181 ticks - tp->t_rtttime);
1182 }
1183 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1184 acked = th->th_ack - tp->snd_una;
1185 tcpstat.tcps_rcvackpack++;
1186 tcpstat.tcps_rcvackbyte += acked;
1187 sbdrop(&so->so_snd, acked);
1188 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1189 SEQ_LEQ(th->th_ack, tp->snd_recover))
1190 tp->snd_recover = th->th_ack - 1;
1191 tp->snd_una = th->th_ack;
1192 /*
1193 * pull snd_wl2 up to prevent seq wrap relative
1194 * to th_ack.
1195 */
1196 tp->snd_wl2 = th->th_ack;
1197 tp->t_dupacks = 0;
1198 m_freem(m);
1199 ND6_HINT(tp); /* some progress has been done */
1200
1201 /*
1202 * If all outstanding data are acked, stop
1203 * retransmit timer, otherwise restart timer
1204 * using current (possibly backed-off) value.
1205 * If process is waiting for space,
1206 * wakeup/selwakeup/signal. If data
1207 * are ready to send, let tcp_output
1208 * decide between more output or persist.
1209
1210#ifdef TCPDEBUG
1211 if (so->so_options & SO_DEBUG)
1212 tcp_trace(TA_INPUT, ostate, tp,
1213 (void *)tcp_saveipgen,
1214 &tcp_savetcp, 0);
1215#endif
1216 */
1217 if (tp->snd_una == tp->snd_max)
1218 callout_stop(tp->tt_rexmt);
1219 else if (!callout_active(tp->tt_persist))
1220 callout_reset(tp->tt_rexmt,
1221 tp->t_rxtcur,
1222 tcp_timer_rexmt, tp);
1223
1224 sowwakeup(so);
1225 if (so->so_snd.sb_cc)
1226 (void) tcp_output(tp);
1227 goto check_delack;
1228 }
1229 } else if (th->th_ack == tp->snd_una &&
1230 LIST_EMPTY(&tp->t_segq) &&
1231 tlen <= sbspace(&so->so_rcv)) {
1232 KASSERT(headlocked, ("headlocked"));
1233 INP_INFO_WUNLOCK(&tcbinfo);
1234 /*
1235 * this is a pure, in-sequence data packet
1236 * with nothing on the reassembly queue and
1237 * we have enough buffer space to take it.
1238 */
1239 /* Clean receiver SACK report if present */
1240 if (tp->sack_enable && tp->rcv_numsacks)
1241 tcp_clean_sackreport(tp);
1242 ++tcpstat.tcps_preddat;
1243 tp->rcv_nxt += tlen;
1244 /*
1245 * Pull snd_wl1 up to prevent seq wrap relative to
1246 * th_seq.
1247 */
1248 tp->snd_wl1 = th->th_seq;
1249 /*
1250 * Pull rcv_up up to prevent seq wrap relative to
1251 * rcv_nxt.
1252 */
1253 tp->rcv_up = tp->rcv_nxt;
1254 tcpstat.tcps_rcvpack++;
1255 tcpstat.tcps_rcvbyte += tlen;
1256 ND6_HINT(tp); /* some progress has been done */
1257 /*
1258#ifdef TCPDEBUG
1259 if (so->so_options & SO_DEBUG)
1260 tcp_trace(TA_INPUT, ostate, tp,
1261 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1262#endif
1263 * Add data to socket buffer.
1264 */
374 return (flags);
375}
376
377/*
378 * TCP input routine, follows pages 65-76 of the
379 * protocol specification dated September, 1981 very closely.
380 */
381#ifdef INET6
382int
383tcp6_input(mp, offp, proto)
384 struct mbuf **mp;
385 int *offp, proto;
386{
387 register struct mbuf *m = *mp;
388 struct in6_ifaddr *ia6;
389
390 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
391
392 /*
393 * draft-itojun-ipv6-tcp-to-anycast
394 * better place to put this in?
395 */
396 ia6 = ip6_getdstifaddr(m);
397 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
398 struct ip6_hdr *ip6;
399
400 ip6 = mtod(m, struct ip6_hdr *);
401 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
402 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
403 return IPPROTO_DONE;
404 }
405
406 tcp_input(m, *offp);
407 return IPPROTO_DONE;
408}
409#endif
410
411void
412tcp_input(m, off0)
413 register struct mbuf *m;
414 int off0;
415{
416 register struct tcphdr *th;
417 register struct ip *ip = NULL;
418 register struct ipovly *ipov;
419 register struct inpcb *inp = NULL;
420 u_char *optp = NULL;
421 int optlen = 0;
422 int len, tlen, off;
423 int drop_hdrlen;
424 register struct tcpcb *tp = 0;
425 register int thflags;
426 struct socket *so = 0;
427 int todrop, acked, ourfinisacked, needoutput = 0;
428 u_long tiwin;
429 struct tcpopt to; /* options in this segment */
430 struct rmxp_tao tao; /* our TAO cache entry */
431 int headlocked = 0;
432 struct sockaddr_in *next_hop = NULL;
433 int rstreason; /* For badport_bandlim accounting purposes */
434
435 struct ip6_hdr *ip6 = NULL;
436#ifdef INET6
437 int isipv6;
438#else
439 const int isipv6 = 0;
440#endif
441
442#ifdef TCPDEBUG
443 /*
444 * The size of tcp_saveipgen must be the size of the max ip header,
445 * now IPv6.
446 */
447 u_char tcp_saveipgen[40];
448 struct tcphdr tcp_savetcp;
449 short ostate = 0;
450#endif
451
452 /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */
453 next_hop = m_claim_next(m, PACKET_TAG_IPFORWARD);
454#ifdef INET6
455 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
456#endif
457 bzero(&tao, sizeof(tao));
458 bzero((char *)&to, sizeof(to));
459
460 tcpstat.tcps_rcvtotal++;
461
462 if (isipv6) {
463#ifdef INET6
464 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
465 ip6 = mtod(m, struct ip6_hdr *);
466 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
467 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
468 tcpstat.tcps_rcvbadsum++;
469 goto drop;
470 }
471 th = (struct tcphdr *)((caddr_t)ip6 + off0);
472
473 /*
474 * Be proactive about unspecified IPv6 address in source.
475 * As we use all-zero to indicate unbounded/unconnected pcb,
476 * unspecified IPv6 address can be used to confuse us.
477 *
478 * Note that packets with unspecified IPv6 destination is
479 * already dropped in ip6_input.
480 */
481 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
482 /* XXX stat */
483 goto drop;
484 }
485#else
486 th = NULL; /* XXX: avoid compiler warning */
487#endif
488 } else {
489 /*
490 * Get IP and TCP header together in first mbuf.
491 * Note: IP leaves IP header in first mbuf.
492 */
493 if (off0 > sizeof (struct ip)) {
494 ip_stripoptions(m, (struct mbuf *)0);
495 off0 = sizeof(struct ip);
496 }
497 if (m->m_len < sizeof (struct tcpiphdr)) {
498 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) {
499 tcpstat.tcps_rcvshort++;
500 return;
501 }
502 }
503 ip = mtod(m, struct ip *);
504 ipov = (struct ipovly *)ip;
505 th = (struct tcphdr *)((caddr_t)ip + off0);
506 tlen = ip->ip_len;
507
508 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
509 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
510 th->th_sum = m->m_pkthdr.csum_data;
511 else
512 th->th_sum = in_pseudo(ip->ip_src.s_addr,
513 ip->ip_dst.s_addr,
514 htonl(m->m_pkthdr.csum_data +
515 ip->ip_len +
516 IPPROTO_TCP));
517 th->th_sum ^= 0xffff;
518#ifdef TCPDEBUG
519 ipov->ih_len = (u_short)tlen;
520 ipov->ih_len = htons(ipov->ih_len);
521#endif
522 } else {
523 /*
524 * Checksum extended TCP header and data.
525 */
526 len = sizeof (struct ip) + tlen;
527 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
528 ipov->ih_len = (u_short)tlen;
529 ipov->ih_len = htons(ipov->ih_len);
530 th->th_sum = in_cksum(m, len);
531 }
532 if (th->th_sum) {
533 tcpstat.tcps_rcvbadsum++;
534 goto drop;
535 }
536#ifdef INET6
537 /* Re-initialization for later version check */
538 ip->ip_v = IPVERSION;
539#endif
540 }
541
542 /*
543 * Check that TCP offset makes sense,
544 * pull out TCP options and adjust length. XXX
545 */
546 off = th->th_off << 2;
547 if (off < sizeof (struct tcphdr) || off > tlen) {
548 tcpstat.tcps_rcvbadoff++;
549 goto drop;
550 }
551 tlen -= off; /* tlen is used instead of ti->ti_len */
552 if (off > sizeof (struct tcphdr)) {
553 if (isipv6) {
554#ifdef INET6
555 IP6_EXTHDR_CHECK(m, off0, off, );
556 ip6 = mtod(m, struct ip6_hdr *);
557 th = (struct tcphdr *)((caddr_t)ip6 + off0);
558#endif
559 } else {
560 if (m->m_len < sizeof(struct ip) + off) {
561 if ((m = m_pullup(m, sizeof (struct ip) + off))
562 == 0) {
563 tcpstat.tcps_rcvshort++;
564 return;
565 }
566 ip = mtod(m, struct ip *);
567 ipov = (struct ipovly *)ip;
568 th = (struct tcphdr *)((caddr_t)ip + off0);
569 }
570 }
571 optlen = off - sizeof (struct tcphdr);
572 optp = (u_char *)(th + 1);
573 }
574 thflags = th->th_flags;
575
576#ifdef TCP_DROP_SYNFIN
577 /*
578 * If the drop_synfin option is enabled, drop all packets with
579 * both the SYN and FIN bits set. This prevents e.g. nmap from
580 * identifying the TCP/IP stack.
581 *
582 * This is a violation of the TCP specification.
583 */
584 if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN))
585 goto drop;
586#endif
587
588 /*
589 * Convert TCP protocol specific fields to host format.
590 */
591 th->th_seq = ntohl(th->th_seq);
592 th->th_ack = ntohl(th->th_ack);
593 th->th_win = ntohs(th->th_win);
594 th->th_urp = ntohs(th->th_urp);
595
596 /*
597 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
598 * until after ip6_savecontrol() is called and before other functions
599 * which don't want those proto headers.
600 * Because ip6_savecontrol() is going to parse the mbuf to
601 * search for data to be passed up to user-land, it wants mbuf
602 * parameters to be unchanged.
603 * XXX: the call of ip6_savecontrol() has been obsoleted based on
604 * latest version of the advanced API (20020110).
605 */
606 drop_hdrlen = off0 + off;
607
608 /*
609 * Locate pcb for segment.
610 */
611 INP_INFO_WLOCK(&tcbinfo);
612 headlocked = 1;
613findpcb:
614 /* IPFIREWALL_FORWARD section */
615 if (next_hop != NULL && isipv6 == 0) { /* IPv6 support is not yet */
616 /*
617 * Transparently forwarded. Pretend to be the destination.
618 * already got one like this?
619 */
620 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
621 ip->ip_dst, th->th_dport,
622 0, m->m_pkthdr.rcvif);
623 if (!inp) {
624 /* It's new. Try find the ambushing socket. */
625 inp = in_pcblookup_hash(&tcbinfo,
626 ip->ip_src, th->th_sport,
627 next_hop->sin_addr,
628 next_hop->sin_port ?
629 ntohs(next_hop->sin_port) :
630 th->th_dport,
631 1, m->m_pkthdr.rcvif);
632 }
633 } else {
634 if (isipv6) {
635#ifdef INET6
636 inp = in6_pcblookup_hash(&tcbinfo,
637 &ip6->ip6_src, th->th_sport,
638 &ip6->ip6_dst, th->th_dport,
639 1, m->m_pkthdr.rcvif);
640#endif
641 } else
642 inp = in_pcblookup_hash(&tcbinfo,
643 ip->ip_src, th->th_sport,
644 ip->ip_dst, th->th_dport,
645 1, m->m_pkthdr.rcvif);
646 }
647
648#if defined(IPSEC) || defined(FAST_IPSEC)
649#ifdef INET6
650 if (isipv6) {
651 if (inp != NULL && ipsec6_in_reject(m, inp)) {
652#ifdef IPSEC
653 ipsec6stat.in_polvio++;
654#endif
655 goto drop;
656 }
657 } else
658#endif /* INET6 */
659 if (inp != NULL && ipsec4_in_reject(m, inp)) {
660#ifdef IPSEC
661 ipsecstat.in_polvio++;
662#endif
663 goto drop;
664 }
665#endif /*IPSEC || FAST_IPSEC*/
666
667 /*
668 * If the state is CLOSED (i.e., TCB does not exist) then
669 * all data in the incoming segment is discarded.
670 * If the TCB exists but is in CLOSED state, it is embryonic,
671 * but should either do a listen or a connect soon.
672 */
673 if (inp == NULL) {
674 if (log_in_vain) {
675#ifdef INET6
676 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2];
677#else
678 char dbuf[4*sizeof "123"], sbuf[4*sizeof "123"];
679#endif
680
681 if (isipv6) {
682#ifdef INET6
683 strcpy(dbuf, "[");
684 strcpy(sbuf, "[");
685 strcat(dbuf, ip6_sprintf(&ip6->ip6_dst));
686 strcat(sbuf, ip6_sprintf(&ip6->ip6_src));
687 strcat(dbuf, "]");
688 strcat(sbuf, "]");
689#endif
690 } else {
691 strcpy(dbuf, inet_ntoa(ip->ip_dst));
692 strcpy(sbuf, inet_ntoa(ip->ip_src));
693 }
694 switch (log_in_vain) {
695 case 1:
696 if ((thflags & TH_SYN) == 0)
697 break;
698 /* FALLTHROUGH */
699 case 2:
700 log(LOG_INFO,
701 "Connection attempt to TCP %s:%d "
702 "from %s:%d flags:0x%02x\n",
703 dbuf, ntohs(th->th_dport), sbuf,
704 ntohs(th->th_sport), thflags);
705 break;
706 default:
707 break;
708 }
709 }
710 if (blackhole) {
711 switch (blackhole) {
712 case 1:
713 if (thflags & TH_SYN)
714 goto drop;
715 break;
716 case 2:
717 goto drop;
718 default:
719 goto drop;
720 }
721 }
722 rstreason = BANDLIM_RST_CLOSEDPORT;
723 goto dropwithreset;
724 }
725 INP_LOCK(inp);
726 if (inp->inp_vflag & INP_TIMEWAIT) {
727 /*
728 * The only option of relevance is TOF_CC, and only if
729 * present in a SYN segment. See tcp_timewait().
730 */
731 if (thflags & TH_SYN)
732 tcp_dooptions((struct tcpcb *)NULL, &to, optp, optlen, 1, th);
733 if (tcp_timewait((struct tcptw *)inp->inp_ppcb,
734 &to, th, m, tlen))
735 goto findpcb;
736 /*
737 * tcp_timewait unlocks inp.
738 */
739 INP_INFO_WUNLOCK(&tcbinfo);
740 return;
741 }
742 tp = intotcpcb(inp);
743 if (tp == 0) {
744 INP_UNLOCK(inp);
745 rstreason = BANDLIM_RST_CLOSEDPORT;
746 goto dropwithreset;
747 }
748 if (tp->t_state == TCPS_CLOSED)
749 goto drop;
750
751 /* Unscale the window into a 32-bit value. */
752 if ((thflags & TH_SYN) == 0)
753 tiwin = th->th_win << tp->snd_scale;
754 else
755 tiwin = th->th_win;
756
757#ifdef MAC
758 INP_LOCK_ASSERT(inp);
759 if (mac_check_inpcb_deliver(inp, m))
760 goto drop;
761#endif
762 so = inp->inp_socket;
763#ifdef TCPDEBUG
764 if (so->so_options & SO_DEBUG) {
765 ostate = tp->t_state;
766 if (isipv6)
767 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
768 else
769 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
770 tcp_savetcp = *th;
771 }
772#endif
773 if (so->so_options & SO_ACCEPTCONN) {
774 struct in_conninfo inc;
775
776#ifdef INET6
777 inc.inc_isipv6 = isipv6;
778#endif
779 if (isipv6) {
780 inc.inc6_faddr = ip6->ip6_src;
781 inc.inc6_laddr = ip6->ip6_dst;
782 } else {
783 inc.inc_faddr = ip->ip_src;
784 inc.inc_laddr = ip->ip_dst;
785 }
786 inc.inc_fport = th->th_sport;
787 inc.inc_lport = th->th_dport;
788
789 /*
790 * If the state is LISTEN then ignore segment if it contains
791 * a RST. If the segment contains an ACK then it is bad and
792 * send a RST. If it does not contain a SYN then it is not
793 * interesting; drop it.
794 *
795 * If the state is SYN_RECEIVED (syncache) and seg contains
796 * an ACK, but not for our SYN/ACK, send a RST. If the seg
797 * contains a RST, check the sequence number to see if it
798 * is a valid reset segment.
799 */
800 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) {
801 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
802 if (!syncache_expand(&inc, th, &so, m)) {
803 /*
804 * No syncache entry, or ACK was not
805 * for our SYN/ACK. Send a RST.
806 */
807 tcpstat.tcps_badsyn++;
808 rstreason = BANDLIM_RST_OPENPORT;
809 goto dropwithreset;
810 }
811 if (so == NULL) {
812 /*
813 * Could not complete 3-way handshake,
814 * connection is being closed down, and
815 * syncache will free mbuf.
816 */
817 INP_UNLOCK(inp);
818 INP_INFO_WUNLOCK(&tcbinfo);
819 return;
820 }
821 /*
822 * Socket is created in state SYN_RECEIVED.
823 * Continue processing segment.
824 */
825 INP_UNLOCK(inp);
826 inp = sotoinpcb(so);
827 INP_LOCK(inp);
828 tp = intotcpcb(inp);
829 /*
830 * This is what would have happened in
831 * tcp_output() when the SYN,ACK was sent.
832 */
833 tp->snd_up = tp->snd_una;
834 tp->snd_max = tp->snd_nxt = tp->iss + 1;
835 tp->last_ack_sent = tp->rcv_nxt;
836 /*
837 * RFC1323: The window in SYN & SYN/ACK
838 * segments is never scaled.
839 */
840 tp->snd_wnd = tiwin; /* unscaled */
841 goto after_listen;
842 }
843 if (thflags & TH_RST) {
844 syncache_chkrst(&inc, th);
845 goto drop;
846 }
847 if (thflags & TH_ACK) {
848 syncache_badack(&inc);
849 tcpstat.tcps_badsyn++;
850 rstreason = BANDLIM_RST_OPENPORT;
851 goto dropwithreset;
852 }
853 goto drop;
854 }
855
856 /*
857 * Segment's flags are (SYN) or (SYN|FIN).
858 */
859#ifdef INET6
860 /*
861 * If deprecated address is forbidden,
862 * we do not accept SYN to deprecated interface
863 * address to prevent any new inbound connection from
864 * getting established.
865 * When we do not accept SYN, we send a TCP RST,
866 * with deprecated source address (instead of dropping
867 * it). We compromise it as it is much better for peer
868 * to send a RST, and RST will be the final packet
869 * for the exchange.
870 *
871 * If we do not forbid deprecated addresses, we accept
872 * the SYN packet. RFC2462 does not suggest dropping
873 * SYN in this case.
874 * If we decipher RFC2462 5.5.4, it says like this:
875 * 1. use of deprecated addr with existing
876 * communication is okay - "SHOULD continue to be
877 * used"
878 * 2. use of it with new communication:
879 * (2a) "SHOULD NOT be used if alternate address
880 * with sufficient scope is available"
881 * (2b) nothing mentioned otherwise.
882 * Here we fall into (2b) case as we have no choice in
883 * our source address selection - we must obey the peer.
884 *
885 * The wording in RFC2462 is confusing, and there are
886 * multiple description text for deprecated address
887 * handling - worse, they are not exactly the same.
888 * I believe 5.5.4 is the best one, so we follow 5.5.4.
889 */
890 if (isipv6 && !ip6_use_deprecated) {
891 struct in6_ifaddr *ia6;
892
893 if ((ia6 = ip6_getdstifaddr(m)) &&
894 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
895 INP_UNLOCK(inp);
896 tp = NULL;
897 rstreason = BANDLIM_RST_OPENPORT;
898 goto dropwithreset;
899 }
900 }
901#endif
902 /*
903 * If it is from this socket, drop it, it must be forged.
904 * Don't bother responding if the destination was a broadcast.
905 */
906 if (th->th_dport == th->th_sport) {
907 if (isipv6) {
908 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
909 &ip6->ip6_src))
910 goto drop;
911 } else {
912 if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
913 goto drop;
914 }
915 }
916 /*
917 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
918 *
919 * Note that it is quite possible to receive unicast
920 * link-layer packets with a broadcast IP address. Use
921 * in_broadcast() to find them.
922 */
923 if (m->m_flags & (M_BCAST|M_MCAST))
924 goto drop;
925 if (isipv6) {
926 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
927 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
928 goto drop;
929 } else {
930 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
931 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
932 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
933 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
934 goto drop;
935 }
936 /*
937 * SYN appears to be valid; create compressed TCP state
938 * for syncache, or perform t/tcp connection.
939 */
940 if (so->so_qlen <= so->so_qlimit) {
941#ifdef TCPDEBUG
942 if (so->so_options & SO_DEBUG)
943 tcp_trace(TA_INPUT, ostate, tp,
944 (void *)tcp_saveipgen, &tcp_savetcp, 0);
945#endif
946 tcp_dooptions(tp, &to, optp, optlen, 1, th);
947 if (!syncache_add(&inc, &to, th, &so, m))
948 goto drop;
949 if (so == NULL) {
950 /*
951 * Entry added to syncache, mbuf used to
952 * send SYN,ACK packet.
953 */
954 KASSERT(headlocked, ("headlocked"));
955 INP_UNLOCK(inp);
956 INP_INFO_WUNLOCK(&tcbinfo);
957 return;
958 }
959 /*
960 * Segment passed TAO tests.
961 */
962 INP_UNLOCK(inp);
963 inp = sotoinpcb(so);
964 INP_LOCK(inp);
965 tp = intotcpcb(inp);
966 tp->snd_wnd = tiwin;
967 tp->t_starttime = ticks;
968 tp->t_state = TCPS_ESTABLISHED;
969
970 /*
971 * T/TCP logic:
972 * If there is a FIN or if there is data, then
973 * delay SYN,ACK(SYN) in the hope of piggy-backing
974 * it on a response segment. Otherwise must send
975 * ACK now in case the other side is slow starting.
976 */
977 if (thflags & TH_FIN || tlen != 0)
978 tp->t_flags |= (TF_DELACK | TF_NEEDSYN);
979 else
980 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
981 tcpstat.tcps_connects++;
982 soisconnected(so);
983 goto trimthenstep6;
984 }
985 goto drop;
986 }
987after_listen:
988
989 /* XXX temp debugging */
990 /* should not happen - syncache should pick up these connections */
991 if (tp->t_state == TCPS_LISTEN)
992 panic("tcp_input: TCPS_LISTEN");
993
994 /*
995 * This is the second part of the MSS DoS prevention code (after
996 * minmss on the sending side) and it deals with too many too small
997 * tcp packets in a too short timeframe (1 second).
998 *
999 * For every full second we count the number of received packets
1000 * and bytes. If we get a lot of packets per second for this connection
1001 * (tcp_minmssoverload) we take a closer look at it and compute the
1002 * average packet size for the past second. If that is less than
1003 * tcp_minmss we get too many packets with very small payload which
1004 * is not good and burdens our system (and every packet generates
1005 * a wakeup to the process connected to our socket). We can reasonable
1006 * expect this to be small packet DoS attack to exhaust our CPU
1007 * cycles.
1008 *
1009 * Care has to be taken for the minimum packet overload value. This
1010 * value defines the minimum number of packets per second before we
1011 * start to worry. This must not be too low to avoid killing for
1012 * example interactive connections with many small packets like
1013 * telnet or SSH.
1014 *
1015 * Setting either tcp_minmssoverload or tcp_minmss to "0" disables
1016 * this check.
1017 *
1018 * Account for packet if payload packet, skip over ACK, etc.
1019 */
1020 if (tcp_minmss && tcp_minmssoverload &&
1021 tp->t_state == TCPS_ESTABLISHED && tlen > 0) {
1022 if (tp->rcv_second > ticks) {
1023 tp->rcv_pps++;
1024 tp->rcv_byps += tlen + off;
1025 if (tp->rcv_pps > tcp_minmssoverload) {
1026 if ((tp->rcv_byps / tp->rcv_pps) < tcp_minmss) {
1027 printf("too many small tcp packets from "
1028 "%s:%u, av. %lubyte/packet, "
1029 "dropping connection\n",
1030#ifdef INET6
1031 isipv6 ?
1032 ip6_sprintf(&inp->inp_inc.inc6_faddr) :
1033#endif
1034 inet_ntoa(inp->inp_inc.inc_faddr),
1035 inp->inp_inc.inc_fport,
1036 tp->rcv_byps / tp->rcv_pps);
1037 tp = tcp_drop(tp, ECONNRESET);
1038 tcpstat.tcps_minmssdrops++;
1039 goto drop;
1040 }
1041 }
1042 } else {
1043 tp->rcv_second = ticks + hz;
1044 tp->rcv_pps = 1;
1045 tp->rcv_byps = tlen + off;
1046 }
1047 }
1048
1049 /*
1050 * Segment received on connection.
1051 * Reset idle time and keep-alive timer.
1052 */
1053 tp->t_rcvtime = ticks;
1054 if (TCPS_HAVEESTABLISHED(tp->t_state))
1055 callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp);
1056
1057 /*
1058 * Process options only when we get SYN/ACK back. The SYN case
1059 * for incoming connections is handled in tcp_syncache.
1060 * XXX this is traditional behavior, may need to be cleaned up.
1061 */
1062 tcp_dooptions(tp, &to, optp, optlen, thflags & TH_SYN, th);
1063 if (thflags & TH_SYN) {
1064 if (to.to_flags & TOF_SCALE) {
1065 tp->t_flags |= TF_RCVD_SCALE;
1066 tp->requested_s_scale = to.to_requested_s_scale;
1067 }
1068 if (to.to_flags & TOF_TS) {
1069 tp->t_flags |= TF_RCVD_TSTMP;
1070 tp->ts_recent = to.to_tsval;
1071 tp->ts_recent_age = ticks;
1072 }
1073 if (to.to_flags & (TOF_CC|TOF_CCNEW))
1074 tp->t_flags |= TF_RCVD_CC;
1075 if (to.to_flags & TOF_MSS)
1076 tcp_mss(tp, to.to_mss);
1077 if (tp->sack_enable) {
1078 if (!(to.to_flags & TOF_SACK))
1079 tp->sack_enable = 0;
1080 else
1081 tp->t_flags |= TF_SACK_PERMIT;
1082 }
1083
1084 }
1085
1086 if (tp->sack_enable) {
1087 /* Delete stale (cumulatively acked) SACK holes */
1088 tcp_del_sackholes(tp, th);
1089 tp->rcv_laststart = th->th_seq; /* last recv'd segment*/
1090 tp->rcv_lastend = th->th_seq + tlen;
1091 }
1092
1093 /*
1094 * Header prediction: check for the two common cases
1095 * of a uni-directional data xfer. If the packet has
1096 * no control flags, is in-sequence, the window didn't
1097 * change and we're not retransmitting, it's a
1098 * candidate. If the length is zero and the ack moved
1099 * forward, we're the sender side of the xfer. Just
1100 * free the data acked & wake any higher level process
1101 * that was blocked waiting for space. If the length
1102 * is non-zero and the ack didn't move, we're the
1103 * receiver side. If we're getting packets in-order
1104 * (the reassembly queue is empty), add the data to
1105 * the socket buffer and note that we need a delayed ack.
1106 * Make sure that the hidden state-flags are also off.
1107 * Since we check for TCPS_ESTABLISHED above, it can only
1108 * be TH_NEEDSYN.
1109 */
1110 if (tp->t_state == TCPS_ESTABLISHED &&
1111 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1112 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1113 ((to.to_flags & TOF_TS) == 0 ||
1114 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
1115 /*
1116 * Using the CC option is compulsory if once started:
1117 * the segment is OK if no T/TCP was negotiated or
1118 * if the segment has a CC option equal to CCrecv
1119 */
1120 ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) != (TF_REQ_CC|TF_RCVD_CC) ||
1121 ((to.to_flags & TOF_CC) != 0 && to.to_cc == tp->cc_recv)) &&
1122 th->th_seq == tp->rcv_nxt &&
1123 tiwin && tiwin == tp->snd_wnd &&
1124 tp->snd_nxt == tp->snd_max) {
1125
1126 /*
1127 * If last ACK falls within this segment's sequence numbers,
1128 * record the timestamp.
1129 * NOTE that the test is modified according to the latest
1130 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1131 */
1132 if ((to.to_flags & TOF_TS) != 0 &&
1133 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1134 tp->ts_recent_age = ticks;
1135 tp->ts_recent = to.to_tsval;
1136 }
1137
1138 if (tlen == 0) {
1139 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1140 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1141 tp->snd_cwnd >= tp->snd_wnd &&
1142 ((!tcp_do_newreno && !tp->sack_enable &&
1143 tp->t_dupacks < tcprexmtthresh) ||
1144 ((tcp_do_newreno || tp->sack_enable) &&
1145 !IN_FASTRECOVERY(tp)))) {
1146 KASSERT(headlocked, ("headlocked"));
1147 INP_INFO_WUNLOCK(&tcbinfo);
1148 /*
1149 * this is a pure ack for outstanding data.
1150 */
1151 ++tcpstat.tcps_predack;
1152 /*
1153 * "bad retransmit" recovery
1154 */
1155 if (tp->t_rxtshift == 1 &&
1156 ticks < tp->t_badrxtwin) {
1157 ++tcpstat.tcps_sndrexmitbad;
1158 tp->snd_cwnd = tp->snd_cwnd_prev;
1159 tp->snd_ssthresh =
1160 tp->snd_ssthresh_prev;
1161 tp->snd_recover = tp->snd_recover_prev;
1162 if (tp->t_flags & TF_WASFRECOVERY)
1163 ENTER_FASTRECOVERY(tp);
1164 tp->snd_nxt = tp->snd_max;
1165 tp->t_badrxtwin = 0;
1166 }
1167
1168 /*
1169 * Recalculate the transmit timer / rtt.
1170 *
1171 * Some boxes send broken timestamp replies
1172 * during the SYN+ACK phase, ignore
1173 * timestamps of 0 or we could calculate a
1174 * huge RTT and blow up the retransmit timer.
1175 */
1176 if ((to.to_flags & TOF_TS) != 0 &&
1177 to.to_tsecr) {
1178 tcp_xmit_timer(tp,
1179 ticks - to.to_tsecr + 1);
1180 } else if (tp->t_rtttime &&
1181 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1182 tcp_xmit_timer(tp,
1183 ticks - tp->t_rtttime);
1184 }
1185 tcp_xmit_bandwidth_limit(tp, th->th_ack);
1186 acked = th->th_ack - tp->snd_una;
1187 tcpstat.tcps_rcvackpack++;
1188 tcpstat.tcps_rcvackbyte += acked;
1189 sbdrop(&so->so_snd, acked);
1190 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1191 SEQ_LEQ(th->th_ack, tp->snd_recover))
1192 tp->snd_recover = th->th_ack - 1;
1193 tp->snd_una = th->th_ack;
1194 /*
1195 * pull snd_wl2 up to prevent seq wrap relative
1196 * to th_ack.
1197 */
1198 tp->snd_wl2 = th->th_ack;
1199 tp->t_dupacks = 0;
1200 m_freem(m);
1201 ND6_HINT(tp); /* some progress has been done */
1202
1203 /*
1204 * If all outstanding data are acked, stop
1205 * retransmit timer, otherwise restart timer
1206 * using current (possibly backed-off) value.
1207 * If process is waiting for space,
1208 * wakeup/selwakeup/signal. If data
1209 * are ready to send, let tcp_output
1210 * decide between more output or persist.
1211
1212#ifdef TCPDEBUG
1213 if (so->so_options & SO_DEBUG)
1214 tcp_trace(TA_INPUT, ostate, tp,
1215 (void *)tcp_saveipgen,
1216 &tcp_savetcp, 0);
1217#endif
1218 */
1219 if (tp->snd_una == tp->snd_max)
1220 callout_stop(tp->tt_rexmt);
1221 else if (!callout_active(tp->tt_persist))
1222 callout_reset(tp->tt_rexmt,
1223 tp->t_rxtcur,
1224 tcp_timer_rexmt, tp);
1225
1226 sowwakeup(so);
1227 if (so->so_snd.sb_cc)
1228 (void) tcp_output(tp);
1229 goto check_delack;
1230 }
1231 } else if (th->th_ack == tp->snd_una &&
1232 LIST_EMPTY(&tp->t_segq) &&
1233 tlen <= sbspace(&so->so_rcv)) {
1234 KASSERT(headlocked, ("headlocked"));
1235 INP_INFO_WUNLOCK(&tcbinfo);
1236 /*
1237 * this is a pure, in-sequence data packet
1238 * with nothing on the reassembly queue and
1239 * we have enough buffer space to take it.
1240 */
1241 /* Clean receiver SACK report if present */
1242 if (tp->sack_enable && tp->rcv_numsacks)
1243 tcp_clean_sackreport(tp);
1244 ++tcpstat.tcps_preddat;
1245 tp->rcv_nxt += tlen;
1246 /*
1247 * Pull snd_wl1 up to prevent seq wrap relative to
1248 * th_seq.
1249 */
1250 tp->snd_wl1 = th->th_seq;
1251 /*
1252 * Pull rcv_up up to prevent seq wrap relative to
1253 * rcv_nxt.
1254 */
1255 tp->rcv_up = tp->rcv_nxt;
1256 tcpstat.tcps_rcvpack++;
1257 tcpstat.tcps_rcvbyte += tlen;
1258 ND6_HINT(tp); /* some progress has been done */
1259 /*
1260#ifdef TCPDEBUG
1261 if (so->so_options & SO_DEBUG)
1262 tcp_trace(TA_INPUT, ostate, tp,
1263 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1264#endif
1265 * Add data to socket buffer.
1266 */
1267 /* Unlocked read. */
1268 SOCKBUF_LOCK(&so->so_rcv);
1265 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1266 m_freem(m);
1267 } else {
1268 m_adj(m, drop_hdrlen); /* delayed header drop */
1269 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1270 m_freem(m);
1271 } else {
1272 m_adj(m, drop_hdrlen); /* delayed header drop */
1269 sbappendstream(&so->so_rcv, m);
1273 sbappendstream_locked(&so->so_rcv, m);
1270 }
1274 }
1271 sorwakeup(so);
1275 sorwakeup_locked(so);
1272 if (DELAY_ACK(tp)) {
1273 tp->t_flags |= TF_DELACK;
1274 } else {
1275 tp->t_flags |= TF_ACKNOW;
1276 tcp_output(tp);
1277 }
1278 goto check_delack;
1279 }
1280 }
1281
1282 /*
1283 * Calculate amount of space in receive window,
1284 * and then do TCP input processing.
1285 * Receive window is amount of space in rcv queue,
1286 * but not less than advertised window.
1287 */
1288 { int win;
1289
1290 win = sbspace(&so->so_rcv);
1291 if (win < 0)
1292 win = 0;
1293 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1294 }
1295
1296 switch (tp->t_state) {
1297
1298 /*
1299 * If the state is SYN_RECEIVED:
1300 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1301 */
1302 case TCPS_SYN_RECEIVED:
1303 if ((thflags & TH_ACK) &&
1304 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1305 SEQ_GT(th->th_ack, tp->snd_max))) {
1306 rstreason = BANDLIM_RST_OPENPORT;
1307 goto dropwithreset;
1308 }
1309 break;
1310
1311 /*
1312 * If the state is SYN_SENT:
1313 * if seg contains an ACK, but not for our SYN, drop the input.
1314 * if seg contains a RST, then drop the connection.
1315 * if seg does not contain SYN, then drop it.
1316 * Otherwise this is an acceptable SYN segment
1317 * initialize tp->rcv_nxt and tp->irs
1318 * if seg contains ack then advance tp->snd_una
1319 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1320 * arrange for segment to be acked (eventually)
1321 * continue processing rest of data/controls, beginning with URG
1322 */
1323 case TCPS_SYN_SENT:
1324 if (tcp_do_rfc1644)
1325 tcp_hc_gettao(&inp->inp_inc, &tao);
1326
1327 if ((thflags & TH_ACK) &&
1328 (SEQ_LEQ(th->th_ack, tp->iss) ||
1329 SEQ_GT(th->th_ack, tp->snd_max))) {
1330 /*
1331 * If we have a cached CCsent for the remote host,
1332 * hence we haven't just crashed and restarted,
1333 * do not send a RST. This may be a retransmission
1334 * from the other side after our earlier ACK was lost.
1335 * Our new SYN, when it arrives, will serve as the
1336 * needed ACK.
1337 */
1338 if (tao.tao_ccsent != 0)
1339 goto drop;
1340 else {
1341 rstreason = BANDLIM_UNLIMITED;
1342 goto dropwithreset;
1343 }
1344 }
1345 if (thflags & TH_RST) {
1346 if (thflags & TH_ACK)
1347 tp = tcp_drop(tp, ECONNREFUSED);
1348 goto drop;
1349 }
1350 if ((thflags & TH_SYN) == 0)
1351 goto drop;
1352 tp->snd_wnd = th->th_win; /* initial send window */
1353 tp->cc_recv = to.to_cc; /* foreign CC */
1354
1355 tp->irs = th->th_seq;
1356 tcp_rcvseqinit(tp);
1357 if (thflags & TH_ACK) {
1358 /*
1359 * Our SYN was acked. If segment contains CC.ECHO
1360 * option, check it to make sure this segment really
1361 * matches our SYN. If not, just drop it as old
1362 * duplicate, but send an RST if we're still playing
1363 * by the old rules. If no CC.ECHO option, make sure
1364 * we don't get fooled into using T/TCP.
1365 */
1366 if (to.to_flags & TOF_CCECHO) {
1367 if (tp->cc_send != to.to_ccecho) {
1368 if (tao.tao_ccsent != 0)
1369 goto drop;
1370 else {
1371 rstreason = BANDLIM_UNLIMITED;
1372 goto dropwithreset;
1373 }
1374 }
1375 } else
1376 tp->t_flags &= ~TF_RCVD_CC;
1377 tcpstat.tcps_connects++;
1378 soisconnected(so);
1379#ifdef MAC
1380 SOCK_LOCK(so);
1381 mac_set_socket_peer_from_mbuf(m, so);
1382 SOCK_UNLOCK(so);
1383#endif
1384 /* Do window scaling on this connection? */
1385 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1386 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1387 tp->snd_scale = tp->requested_s_scale;
1388 tp->rcv_scale = tp->request_r_scale;
1389 }
1390 /* Segment is acceptable, update cache if undefined. */
1391 if (tao.tao_ccsent == 0 && tcp_do_rfc1644)
1392 tcp_hc_updatetao(&inp->inp_inc, TCP_HC_TAO_CCSENT, to.to_ccecho, 0);
1393
1394 tp->rcv_adv += tp->rcv_wnd;
1395 tp->snd_una++; /* SYN is acked */
1396 /*
1397 * If there's data, delay ACK; if there's also a FIN
1398 * ACKNOW will be turned on later.
1399 */
1400 if (DELAY_ACK(tp) && tlen != 0)
1401 callout_reset(tp->tt_delack, tcp_delacktime,
1402 tcp_timer_delack, tp);
1403 else
1404 tp->t_flags |= TF_ACKNOW;
1405 /*
1406 * Received <SYN,ACK> in SYN_SENT[*] state.
1407 * Transitions:
1408 * SYN_SENT --> ESTABLISHED
1409 * SYN_SENT* --> FIN_WAIT_1
1410 */
1411 tp->t_starttime = ticks;
1412 if (tp->t_flags & TF_NEEDFIN) {
1413 tp->t_state = TCPS_FIN_WAIT_1;
1414 tp->t_flags &= ~TF_NEEDFIN;
1415 thflags &= ~TH_SYN;
1416 } else {
1417 tp->t_state = TCPS_ESTABLISHED;
1418 callout_reset(tp->tt_keep, tcp_keepidle,
1419 tcp_timer_keep, tp);
1420 }
1421 } else {
1422 /*
1423 * Received initial SYN in SYN-SENT[*] state =>
1424 * simultaneous open. If segment contains CC option
1425 * and there is a cached CC, apply TAO test.
1426 * If it succeeds, connection is * half-synchronized.
1427 * Otherwise, do 3-way handshake:
1428 * SYN-SENT -> SYN-RECEIVED
1429 * SYN-SENT* -> SYN-RECEIVED*
1430 * If there was no CC option, clear cached CC value.
1431 */
1432 tp->t_flags |= TF_ACKNOW;
1433 callout_stop(tp->tt_rexmt);
1434 if (to.to_flags & TOF_CC) {
1435 if (tao.tao_cc != 0 &&
1436 CC_GT(to.to_cc, tao.tao_cc)) {
1437 /*
1438 * update cache and make transition:
1439 * SYN-SENT -> ESTABLISHED*
1440 * SYN-SENT* -> FIN-WAIT-1*
1441 */
1442 tao.tao_cc = to.to_cc;
1443 tcp_hc_updatetao(&inp->inp_inc,
1444 TCP_HC_TAO_CC, to.to_cc, 0);
1445 tp->t_starttime = ticks;
1446 if (tp->t_flags & TF_NEEDFIN) {
1447 tp->t_state = TCPS_FIN_WAIT_1;
1448 tp->t_flags &= ~TF_NEEDFIN;
1449 } else {
1450 tp->t_state = TCPS_ESTABLISHED;
1451 callout_reset(tp->tt_keep,
1452 tcp_keepidle,
1453 tcp_timer_keep,
1454 tp);
1455 }
1456 tp->t_flags |= TF_NEEDSYN;
1457 } else
1458 tp->t_state = TCPS_SYN_RECEIVED;
1459 } else {
1460 if (tcp_do_rfc1644) {
1461 /* CC.NEW or no option => invalidate cache */
1462 tao.tao_cc = 0;
1463 tcp_hc_updatetao(&inp->inp_inc,
1464 TCP_HC_TAO_CC, to.to_cc, 0);
1465 }
1466 tp->t_state = TCPS_SYN_RECEIVED;
1467 }
1468 }
1469
1470trimthenstep6:
1471 /*
1472 * Advance th->th_seq to correspond to first data byte.
1473 * If data, trim to stay within window,
1474 * dropping FIN if necessary.
1475 */
1476 th->th_seq++;
1477 if (tlen > tp->rcv_wnd) {
1478 todrop = tlen - tp->rcv_wnd;
1479 m_adj(m, -todrop);
1480 tlen = tp->rcv_wnd;
1481 thflags &= ~TH_FIN;
1482 tcpstat.tcps_rcvpackafterwin++;
1483 tcpstat.tcps_rcvbyteafterwin += todrop;
1484 }
1485 tp->snd_wl1 = th->th_seq - 1;
1486 tp->rcv_up = th->th_seq;
1487 /*
1488 * Client side of transaction: already sent SYN and data.
1489 * If the remote host used T/TCP to validate the SYN,
1490 * our data will be ACK'd; if so, enter normal data segment
1491 * processing in the middle of step 5, ack processing.
1492 * Otherwise, goto step 6.
1493 */
1494 if (thflags & TH_ACK)
1495 goto process_ACK;
1496
1497 goto step6;
1498
1499 /*
1500 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1501 * if segment contains a SYN and CC [not CC.NEW] option:
1502 * if state == TIME_WAIT and connection duration > MSL,
1503 * drop packet and send RST;
1504 *
1505 * if SEG.CC > CCrecv then is new SYN, and can implicitly
1506 * ack the FIN (and data) in retransmission queue.
1507 * Complete close and delete TCPCB. Then reprocess
1508 * segment, hoping to find new TCPCB in LISTEN state;
1509 *
1510 * else must be old SYN; drop it.
1511 * else do normal processing.
1512 */
1513 case TCPS_LAST_ACK:
1514 case TCPS_CLOSING:
1515 case TCPS_TIME_WAIT:
1516 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
1517 if ((thflags & TH_SYN) &&
1518 (to.to_flags & TOF_CC) && tp->cc_recv != 0) {
1519 if (tp->t_state == TCPS_TIME_WAIT &&
1520 (ticks - tp->t_starttime) > tcp_msl) {
1521 rstreason = BANDLIM_UNLIMITED;
1522 goto dropwithreset;
1523 }
1524 if (CC_GT(to.to_cc, tp->cc_recv)) {
1525 tp = tcp_close(tp);
1526 goto findpcb;
1527 }
1528 else
1529 goto drop;
1530 }
1531 break; /* continue normal processing */
1532 }
1533
1534 /*
1535 * States other than LISTEN or SYN_SENT.
1536 * First check the RST flag and sequence number since reset segments
1537 * are exempt from the timestamp and connection count tests. This
1538 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1539 * below which allowed reset segments in half the sequence space
1540 * to fall though and be processed (which gives forged reset
1541 * segments with a random sequence number a 50 percent chance of
1542 * killing a connection).
1543 * Then check timestamp, if present.
1544 * Then check the connection count, if present.
1545 * Then check that at least some bytes of segment are within
1546 * receive window. If segment begins before rcv_nxt,
1547 * drop leading data (and SYN); if nothing left, just ack.
1548 *
1549 *
1550 * If the RST bit is set, check the sequence number to see
1551 * if this is a valid reset segment.
1552 * RFC 793 page 37:
1553 * In all states except SYN-SENT, all reset (RST) segments
1554 * are validated by checking their SEQ-fields. A reset is
1555 * valid if its sequence number is in the window.
1556 * Note: this does not take into account delayed ACKs, so
1557 * we should test against last_ack_sent instead of rcv_nxt.
1558 * The sequence number in the reset segment is normally an
1559 * echo of our outgoing acknowlegement numbers, but some hosts
1560 * send a reset with the sequence number at the rightmost edge
1561 * of our receive window, and we have to handle this case.
1562 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
1563 * that brute force RST attacks are possible. To combat this,
1564 * we use a much stricter check while in the ESTABLISHED state,
1565 * only accepting RSTs where the sequence number is equal to
1566 * last_ack_sent. In all other states (the states in which a
1567 * RST is more likely), the more permissive check is used.
1568 * If we have multiple segments in flight, the intial reset
1569 * segment sequence numbers will be to the left of last_ack_sent,
1570 * but they will eventually catch up.
1571 * In any case, it never made sense to trim reset segments to
1572 * fit the receive window since RFC 1122 says:
1573 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1574 *
1575 * A TCP SHOULD allow a received RST segment to include data.
1576 *
1577 * DISCUSSION
1578 * It has been suggested that a RST segment could contain
1579 * ASCII text that encoded and explained the cause of the
1580 * RST. No standard has yet been established for such
1581 * data.
1582 *
1583 * If the reset segment passes the sequence number test examine
1584 * the state:
1585 * SYN_RECEIVED STATE:
1586 * If passive open, return to LISTEN state.
1587 * If active open, inform user that connection was refused.
1588 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1589 * Inform user that connection was reset, and close tcb.
1590 * CLOSING, LAST_ACK STATES:
1591 * Close the tcb.
1592 * TIME_WAIT STATE:
1593 * Drop the segment - see Stevens, vol. 2, p. 964 and
1594 * RFC 1337.
1595 */
1596 if (thflags & TH_RST) {
1597 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
1598 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1599 switch (tp->t_state) {
1600
1601 case TCPS_SYN_RECEIVED:
1602 so->so_error = ECONNREFUSED;
1603 goto close;
1604
1605 case TCPS_ESTABLISHED:
1606 if (tp->last_ack_sent != th->th_seq) {
1607 tcpstat.tcps_badrst++;
1608 goto drop;
1609 }
1610 case TCPS_FIN_WAIT_1:
1611 case TCPS_FIN_WAIT_2:
1612 case TCPS_CLOSE_WAIT:
1613 so->so_error = ECONNRESET;
1614 close:
1615 tp->t_state = TCPS_CLOSED;
1616 tcpstat.tcps_drops++;
1617 tp = tcp_close(tp);
1618 break;
1619
1620 case TCPS_CLOSING:
1621 case TCPS_LAST_ACK:
1622 tp = tcp_close(tp);
1623 break;
1624
1625 case TCPS_TIME_WAIT:
1626 KASSERT(tp->t_state != TCPS_TIME_WAIT,
1627 ("timewait"));
1628 break;
1629 }
1630 }
1631 goto drop;
1632 }
1633
1634 /*
1635 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1636 * and it's less than ts_recent, drop it.
1637 */
1638 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
1639 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1640
1641 /* Check to see if ts_recent is over 24 days old. */
1642 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1643 /*
1644 * Invalidate ts_recent. If this segment updates
1645 * ts_recent, the age will be reset later and ts_recent
1646 * will get a valid value. If it does not, setting
1647 * ts_recent to zero will at least satisfy the
1648 * requirement that zero be placed in the timestamp
1649 * echo reply when ts_recent isn't valid. The
1650 * age isn't reset until we get a valid ts_recent
1651 * because we don't want out-of-order segments to be
1652 * dropped when ts_recent is old.
1653 */
1654 tp->ts_recent = 0;
1655 } else {
1656 tcpstat.tcps_rcvduppack++;
1657 tcpstat.tcps_rcvdupbyte += tlen;
1658 tcpstat.tcps_pawsdrop++;
1659 if (tlen)
1660 goto dropafterack;
1661 goto drop;
1662 }
1663 }
1664
1665 /*
1666 * T/TCP mechanism
1667 * If T/TCP was negotiated and the segment doesn't have CC,
1668 * or if its CC is wrong then drop the segment.
1669 * RST segments do not have to comply with this.
1670 */
1671 if ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) == (TF_REQ_CC|TF_RCVD_CC) &&
1672 ((to.to_flags & TOF_CC) == 0 || tp->cc_recv != to.to_cc))
1673 goto dropafterack;
1674
1675 /*
1676 * In the SYN-RECEIVED state, validate that the packet belongs to
1677 * this connection before trimming the data to fit the receive
1678 * window. Check the sequence number versus IRS since we know
1679 * the sequence numbers haven't wrapped. This is a partial fix
1680 * for the "LAND" DoS attack.
1681 */
1682 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1683 rstreason = BANDLIM_RST_OPENPORT;
1684 goto dropwithreset;
1685 }
1686
1687 todrop = tp->rcv_nxt - th->th_seq;
1688 if (todrop > 0) {
1689 if (thflags & TH_SYN) {
1690 thflags &= ~TH_SYN;
1691 th->th_seq++;
1692 if (th->th_urp > 1)
1693 th->th_urp--;
1694 else
1695 thflags &= ~TH_URG;
1696 todrop--;
1697 }
1698 /*
1699 * Following if statement from Stevens, vol. 2, p. 960.
1700 */
1701 if (todrop > tlen
1702 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1703 /*
1704 * Any valid FIN must be to the left of the window.
1705 * At this point the FIN must be a duplicate or out
1706 * of sequence; drop it.
1707 */
1708 thflags &= ~TH_FIN;
1709
1710 /*
1711 * Send an ACK to resynchronize and drop any data.
1712 * But keep on processing for RST or ACK.
1713 */
1714 tp->t_flags |= TF_ACKNOW;
1715 todrop = tlen;
1716 tcpstat.tcps_rcvduppack++;
1717 tcpstat.tcps_rcvdupbyte += todrop;
1718 } else {
1719 tcpstat.tcps_rcvpartduppack++;
1720 tcpstat.tcps_rcvpartdupbyte += todrop;
1721 }
1722 drop_hdrlen += todrop; /* drop from the top afterwards */
1723 th->th_seq += todrop;
1724 tlen -= todrop;
1725 if (th->th_urp > todrop)
1726 th->th_urp -= todrop;
1727 else {
1728 thflags &= ~TH_URG;
1729 th->th_urp = 0;
1730 }
1731 }
1732
1733 /*
1734 * If new data are received on a connection after the
1735 * user processes are gone, then RST the other end.
1736 */
1737 if ((so->so_state & SS_NOFDREF) &&
1738 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1739 tp = tcp_close(tp);
1740 tcpstat.tcps_rcvafterclose++;
1741 rstreason = BANDLIM_UNLIMITED;
1742 goto dropwithreset;
1743 }
1744
1745 /*
1746 * If segment ends after window, drop trailing data
1747 * (and PUSH and FIN); if nothing left, just ACK.
1748 */
1749 todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd);
1750 if (todrop > 0) {
1751 tcpstat.tcps_rcvpackafterwin++;
1752 if (todrop >= tlen) {
1753 tcpstat.tcps_rcvbyteafterwin += tlen;
1754 /*
1755 * If a new connection request is received
1756 * while in TIME_WAIT, drop the old connection
1757 * and start over if the sequence numbers
1758 * are above the previous ones.
1759 */
1760 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
1761 if (thflags & TH_SYN &&
1762 tp->t_state == TCPS_TIME_WAIT &&
1763 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1764 tp = tcp_close(tp);
1765 goto findpcb;
1766 }
1767 /*
1768 * If window is closed can only take segments at
1769 * window edge, and have to drop data and PUSH from
1770 * incoming segments. Continue processing, but
1771 * remember to ack. Otherwise, drop segment
1772 * and ack.
1773 */
1774 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1775 tp->t_flags |= TF_ACKNOW;
1776 tcpstat.tcps_rcvwinprobe++;
1777 } else
1778 goto dropafterack;
1779 } else
1780 tcpstat.tcps_rcvbyteafterwin += todrop;
1781 m_adj(m, -todrop);
1782 tlen -= todrop;
1783 thflags &= ~(TH_PUSH|TH_FIN);
1784 }
1785
1786 /*
1787 * If last ACK falls within this segment's sequence numbers,
1788 * record its timestamp.
1789 * NOTE that the test is modified according to the latest
1790 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1791 */
1792 if ((to.to_flags & TOF_TS) != 0 &&
1793 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1794 tp->ts_recent_age = ticks;
1795 tp->ts_recent = to.to_tsval;
1796 }
1797
1798 /*
1799 * If a SYN is in the window, then this is an
1800 * error and we send an RST and drop the connection.
1801 */
1802 if (thflags & TH_SYN) {
1803 tp = tcp_drop(tp, ECONNRESET);
1804 rstreason = BANDLIM_UNLIMITED;
1805 goto drop;
1806 }
1807
1808 /*
1809 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1810 * flag is on (half-synchronized state), then queue data for
1811 * later processing; else drop segment and return.
1812 */
1813 if ((thflags & TH_ACK) == 0) {
1814 if (tp->t_state == TCPS_SYN_RECEIVED ||
1815 (tp->t_flags & TF_NEEDSYN))
1816 goto step6;
1817 else
1818 goto drop;
1819 }
1820
1821 /*
1822 * Ack processing.
1823 */
1824 switch (tp->t_state) {
1825
1826 /*
1827 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
1828 * ESTABLISHED state and continue processing.
1829 * The ACK was checked above.
1830 */
1831 case TCPS_SYN_RECEIVED:
1832
1833 tcpstat.tcps_connects++;
1834 soisconnected(so);
1835 /* Do window scaling? */
1836 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1837 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1838 tp->snd_scale = tp->requested_s_scale;
1839 tp->rcv_scale = tp->request_r_scale;
1840 }
1841 /*
1842 * Upon successful completion of 3-way handshake,
1843 * update cache.CC, pass any queued data to the user,
1844 * and advance state appropriately.
1845 */
1846 if (tcp_do_rfc1644) {
1847 tao.tao_cc = tp->cc_recv;
1848 tcp_hc_updatetao(&inp->inp_inc, TCP_HC_TAO_CC,
1849 tp->cc_recv, 0);
1850 }
1851 /*
1852 * Make transitions:
1853 * SYN-RECEIVED -> ESTABLISHED
1854 * SYN-RECEIVED* -> FIN-WAIT-1
1855 */
1856 tp->t_starttime = ticks;
1857 if (tp->t_flags & TF_NEEDFIN) {
1858 tp->t_state = TCPS_FIN_WAIT_1;
1859 tp->t_flags &= ~TF_NEEDFIN;
1860 } else {
1861 tp->t_state = TCPS_ESTABLISHED;
1862 callout_reset(tp->tt_keep, tcp_keepidle,
1863 tcp_timer_keep, tp);
1864 }
1865 /*
1866 * If segment contains data or ACK, will call tcp_reass()
1867 * later; if not, do so now to pass queued data to user.
1868 */
1869 if (tlen == 0 && (thflags & TH_FIN) == 0)
1870 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
1871 (struct mbuf *)0);
1872 tp->snd_wl1 = th->th_seq - 1;
1873 /* FALLTHROUGH */
1874
1875 /*
1876 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1877 * ACKs. If the ack is in the range
1878 * tp->snd_una < th->th_ack <= tp->snd_max
1879 * then advance tp->snd_una to th->th_ack and drop
1880 * data from the retransmission queue. If this ACK reflects
1881 * more up to date window information we update our window information.
1882 */
1883 case TCPS_ESTABLISHED:
1884 case TCPS_FIN_WAIT_1:
1885 case TCPS_FIN_WAIT_2:
1886 case TCPS_CLOSE_WAIT:
1887 case TCPS_CLOSING:
1888 case TCPS_LAST_ACK:
1889 case TCPS_TIME_WAIT:
1890 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
1891 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
1892 if (tlen == 0 && tiwin == tp->snd_wnd) {
1893 tcpstat.tcps_rcvdupack++;
1894 /*
1895 * If we have outstanding data (other than
1896 * a window probe), this is a completely
1897 * duplicate ack (ie, window info didn't
1898 * change), the ack is the biggest we've
1899 * seen and we've seen exactly our rexmt
1900 * threshhold of them, assume a packet
1901 * has been dropped and retransmit it.
1902 * Kludge snd_nxt & the congestion
1903 * window so we send only this one
1904 * packet.
1905 *
1906 * We know we're losing at the current
1907 * window size so do congestion avoidance
1908 * (set ssthresh to half the current window
1909 * and pull our congestion window back to
1910 * the new ssthresh).
1911 *
1912 * Dup acks mean that packets have left the
1913 * network (they're now cached at the receiver)
1914 * so bump cwnd by the amount in the receiver
1915 * to keep a constant cwnd packets in the
1916 * network.
1917 */
1918 if (!callout_active(tp->tt_rexmt) ||
1919 th->th_ack != tp->snd_una)
1920 tp->t_dupacks = 0;
1921 else if (++tp->t_dupacks > tcprexmtthresh ||
1922 ((tcp_do_newreno || tp->sack_enable) &&
1923 IN_FASTRECOVERY(tp))) {
1924 tp->snd_cwnd += tp->t_maxseg;
1925 (void) tcp_output(tp);
1926 goto drop;
1927 } else if (tp->t_dupacks == tcprexmtthresh) {
1928 tcp_seq onxt = tp->snd_nxt;
1929 u_int win;
1930 if ((tcp_do_newreno ||
1931 tp->sack_enable) &&
1932 SEQ_LEQ(th->th_ack,
1933 tp->snd_recover)) {
1934 tp->t_dupacks = 0;
1935 break;
1936 }
1937 win = min(tp->snd_wnd, tp->snd_cwnd) /
1938 2 / tp->t_maxseg;
1939 if (win < 2)
1940 win = 2;
1941 tp->snd_ssthresh = win * tp->t_maxseg;
1942 ENTER_FASTRECOVERY(tp);
1943 tp->snd_recover = tp->snd_max;
1944 callout_stop(tp->tt_rexmt);
1945 tp->t_rtttime = 0;
1946 if (tp->sack_enable) {
1947 tcpstat.tcps_sack_recovery_episode++;
1948 tp->snd_cwnd =
1949 tp->t_maxseg *
1950 tp->t_dupacks;
1951 (void) tcp_output(tp);
1952 tp->snd_cwnd =
1953 tp->snd_ssthresh;
1954 goto drop;
1955 }
1956
1957 tp->snd_nxt = th->th_ack;
1958 tp->snd_cwnd = tp->t_maxseg;
1959 (void) tcp_output(tp);
1960 KASSERT(tp->snd_limited <= 2,
1961 ("tp->snd_limited too big"));
1962 tp->snd_cwnd = tp->snd_ssthresh +
1963 tp->t_maxseg *
1964 (tp->t_dupacks - tp->snd_limited);
1965 if (SEQ_GT(onxt, tp->snd_nxt))
1966 tp->snd_nxt = onxt;
1967 goto drop;
1968 } else if (tcp_do_rfc3042) {
1969 u_long oldcwnd = tp->snd_cwnd;
1970 tcp_seq oldsndmax = tp->snd_max;
1971 u_int sent;
1972
1973 KASSERT(tp->t_dupacks == 1 ||
1974 tp->t_dupacks == 2,
1975 ("dupacks not 1 or 2"));
1976 if (tp->t_dupacks == 1)
1977 tp->snd_limited = 0;
1978 tp->snd_cwnd =
1979 (tp->snd_nxt - tp->snd_una) +
1980 (tp->t_dupacks - tp->snd_limited) *
1981 tp->t_maxseg;
1982 (void) tcp_output(tp);
1983 sent = tp->snd_max - oldsndmax;
1984 if (sent > tp->t_maxseg) {
1985 KASSERT((tp->t_dupacks == 2 &&
1986 tp->snd_limited == 0) ||
1987 (sent == tp->t_maxseg + 1 &&
1988 tp->t_flags & TF_SENTFIN),
1989 ("sent too much"));
1990 tp->snd_limited = 2;
1991 } else if (sent > 0)
1992 ++tp->snd_limited;
1993 tp->snd_cwnd = oldcwnd;
1994 goto drop;
1995 }
1996 } else
1997 tp->t_dupacks = 0;
1998 break;
1999 }
2000
2001 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una"));
2002
2003 /*
2004 * If the congestion window was inflated to account
2005 * for the other side's cached packets, retract it.
2006 */
2007 if (tcp_do_newreno || tp->sack_enable) {
2008 if (IN_FASTRECOVERY(tp)) {
2009 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2010 if (tp->sack_enable)
2011 tcp_sack_partialack(tp, th);
2012 else
2013 tcp_newreno_partial_ack(tp, th);
2014 } else {
2015 /*
2016 * Out of fast recovery.
2017 * Window inflation should have left us
2018 * with approximately snd_ssthresh
2019 * outstanding data.
2020 * But in case we would be inclined to
2021 * send a burst, better to do it via
2022 * the slow start mechanism.
2023 */
2024 if (SEQ_GT(th->th_ack +
2025 tp->snd_ssthresh,
2026 tp->snd_max))
2027 tp->snd_cwnd = tp->snd_max -
2028 th->th_ack +
2029 tp->t_maxseg;
2030 else
2031 tp->snd_cwnd = tp->snd_ssthresh;
2032 }
2033 }
2034 } else {
2035 if (tp->t_dupacks >= tcprexmtthresh &&
2036 tp->snd_cwnd > tp->snd_ssthresh)
2037 tp->snd_cwnd = tp->snd_ssthresh;
2038 }
2039 tp->t_dupacks = 0;
2040 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2041 tcpstat.tcps_rcvacktoomuch++;
2042 goto dropafterack;
2043 }
2044 /*
2045 * If we reach this point, ACK is not a duplicate,
2046 * i.e., it ACKs something we sent.
2047 */
2048 if (tp->t_flags & TF_NEEDSYN) {
2049 /*
2050 * T/TCP: Connection was half-synchronized, and our
2051 * SYN has been ACK'd (so connection is now fully
2052 * synchronized). Go to non-starred state,
2053 * increment snd_una for ACK of SYN, and check if
2054 * we can do window scaling.
2055 */
2056 tp->t_flags &= ~TF_NEEDSYN;
2057 tp->snd_una++;
2058 /* Do window scaling? */
2059 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2060 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2061 tp->snd_scale = tp->requested_s_scale;
2062 tp->rcv_scale = tp->request_r_scale;
2063 }
2064 }
2065
2066process_ACK:
2067 acked = th->th_ack - tp->snd_una;
2068 tcpstat.tcps_rcvackpack++;
2069 tcpstat.tcps_rcvackbyte += acked;
2070
2071 /*
2072 * If we just performed our first retransmit, and the ACK
2073 * arrives within our recovery window, then it was a mistake
2074 * to do the retransmit in the first place. Recover our
2075 * original cwnd and ssthresh, and proceed to transmit where
2076 * we left off.
2077 */
2078 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
2079 ++tcpstat.tcps_sndrexmitbad;
2080 tp->snd_cwnd = tp->snd_cwnd_prev;
2081 tp->snd_ssthresh = tp->snd_ssthresh_prev;
2082 tp->snd_recover = tp->snd_recover_prev;
2083 if (tp->t_flags & TF_WASFRECOVERY)
2084 ENTER_FASTRECOVERY(tp);
2085 tp->snd_nxt = tp->snd_max;
2086 tp->t_badrxtwin = 0; /* XXX probably not required */
2087 }
2088
2089 /*
2090 * If we have a timestamp reply, update smoothed
2091 * round trip time. If no timestamp is present but
2092 * transmit timer is running and timed sequence
2093 * number was acked, update smoothed round trip time.
2094 * Since we now have an rtt measurement, cancel the
2095 * timer backoff (cf., Phil Karn's retransmit alg.).
2096 * Recompute the initial retransmit timer.
2097 *
2098 * Some boxes send broken timestamp replies
2099 * during the SYN+ACK phase, ignore
2100 * timestamps of 0 or we could calculate a
2101 * huge RTT and blow up the retransmit timer.
2102 */
2103 if ((to.to_flags & TOF_TS) != 0 &&
2104 to.to_tsecr) {
2105 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
2106 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2107 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2108 }
2109 tcp_xmit_bandwidth_limit(tp, th->th_ack);
2110
2111 /*
2112 * If all outstanding data is acked, stop retransmit
2113 * timer and remember to restart (more output or persist).
2114 * If there is more data to be acked, restart retransmit
2115 * timer, using current (possibly backed-off) value.
2116 */
2117 if (th->th_ack == tp->snd_max) {
2118 callout_stop(tp->tt_rexmt);
2119 needoutput = 1;
2120 } else if (!callout_active(tp->tt_persist))
2121 callout_reset(tp->tt_rexmt, tp->t_rxtcur,
2122 tcp_timer_rexmt, tp);
2123
2124 /*
2125 * If no data (only SYN) was ACK'd,
2126 * skip rest of ACK processing.
2127 */
2128 if (acked == 0)
2129 goto step6;
2130
2131 /*
2132 * When new data is acked, open the congestion window.
2133 * If the window gives us less than ssthresh packets
2134 * in flight, open exponentially (maxseg per packet).
2135 * Otherwise open linearly: maxseg per window
2136 * (maxseg^2 / cwnd per packet).
2137 */
2138 if ((!tcp_do_newreno && !tp->sack_enable) ||
2139 !IN_FASTRECOVERY(tp)) {
2140 register u_int cw = tp->snd_cwnd;
2141 register u_int incr = tp->t_maxseg;
2142 if (cw > tp->snd_ssthresh)
2143 incr = incr * incr / cw;
2144 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale);
2145 }
2146 SOCKBUF_LOCK(&so->so_snd);
2147 if (acked > so->so_snd.sb_cc) {
2148 tp->snd_wnd -= so->so_snd.sb_cc;
2149 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2150 ourfinisacked = 1;
2151 } else {
2152 sbdrop_locked(&so->so_snd, acked);
2153 tp->snd_wnd -= acked;
2154 ourfinisacked = 0;
2155 }
1276 if (DELAY_ACK(tp)) {
1277 tp->t_flags |= TF_DELACK;
1278 } else {
1279 tp->t_flags |= TF_ACKNOW;
1280 tcp_output(tp);
1281 }
1282 goto check_delack;
1283 }
1284 }
1285
1286 /*
1287 * Calculate amount of space in receive window,
1288 * and then do TCP input processing.
1289 * Receive window is amount of space in rcv queue,
1290 * but not less than advertised window.
1291 */
1292 { int win;
1293
1294 win = sbspace(&so->so_rcv);
1295 if (win < 0)
1296 win = 0;
1297 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1298 }
1299
1300 switch (tp->t_state) {
1301
1302 /*
1303 * If the state is SYN_RECEIVED:
1304 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1305 */
1306 case TCPS_SYN_RECEIVED:
1307 if ((thflags & TH_ACK) &&
1308 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1309 SEQ_GT(th->th_ack, tp->snd_max))) {
1310 rstreason = BANDLIM_RST_OPENPORT;
1311 goto dropwithreset;
1312 }
1313 break;
1314
1315 /*
1316 * If the state is SYN_SENT:
1317 * if seg contains an ACK, but not for our SYN, drop the input.
1318 * if seg contains a RST, then drop the connection.
1319 * if seg does not contain SYN, then drop it.
1320 * Otherwise this is an acceptable SYN segment
1321 * initialize tp->rcv_nxt and tp->irs
1322 * if seg contains ack then advance tp->snd_una
1323 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1324 * arrange for segment to be acked (eventually)
1325 * continue processing rest of data/controls, beginning with URG
1326 */
1327 case TCPS_SYN_SENT:
1328 if (tcp_do_rfc1644)
1329 tcp_hc_gettao(&inp->inp_inc, &tao);
1330
1331 if ((thflags & TH_ACK) &&
1332 (SEQ_LEQ(th->th_ack, tp->iss) ||
1333 SEQ_GT(th->th_ack, tp->snd_max))) {
1334 /*
1335 * If we have a cached CCsent for the remote host,
1336 * hence we haven't just crashed and restarted,
1337 * do not send a RST. This may be a retransmission
1338 * from the other side after our earlier ACK was lost.
1339 * Our new SYN, when it arrives, will serve as the
1340 * needed ACK.
1341 */
1342 if (tao.tao_ccsent != 0)
1343 goto drop;
1344 else {
1345 rstreason = BANDLIM_UNLIMITED;
1346 goto dropwithreset;
1347 }
1348 }
1349 if (thflags & TH_RST) {
1350 if (thflags & TH_ACK)
1351 tp = tcp_drop(tp, ECONNREFUSED);
1352 goto drop;
1353 }
1354 if ((thflags & TH_SYN) == 0)
1355 goto drop;
1356 tp->snd_wnd = th->th_win; /* initial send window */
1357 tp->cc_recv = to.to_cc; /* foreign CC */
1358
1359 tp->irs = th->th_seq;
1360 tcp_rcvseqinit(tp);
1361 if (thflags & TH_ACK) {
1362 /*
1363 * Our SYN was acked. If segment contains CC.ECHO
1364 * option, check it to make sure this segment really
1365 * matches our SYN. If not, just drop it as old
1366 * duplicate, but send an RST if we're still playing
1367 * by the old rules. If no CC.ECHO option, make sure
1368 * we don't get fooled into using T/TCP.
1369 */
1370 if (to.to_flags & TOF_CCECHO) {
1371 if (tp->cc_send != to.to_ccecho) {
1372 if (tao.tao_ccsent != 0)
1373 goto drop;
1374 else {
1375 rstreason = BANDLIM_UNLIMITED;
1376 goto dropwithreset;
1377 }
1378 }
1379 } else
1380 tp->t_flags &= ~TF_RCVD_CC;
1381 tcpstat.tcps_connects++;
1382 soisconnected(so);
1383#ifdef MAC
1384 SOCK_LOCK(so);
1385 mac_set_socket_peer_from_mbuf(m, so);
1386 SOCK_UNLOCK(so);
1387#endif
1388 /* Do window scaling on this connection? */
1389 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1390 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1391 tp->snd_scale = tp->requested_s_scale;
1392 tp->rcv_scale = tp->request_r_scale;
1393 }
1394 /* Segment is acceptable, update cache if undefined. */
1395 if (tao.tao_ccsent == 0 && tcp_do_rfc1644)
1396 tcp_hc_updatetao(&inp->inp_inc, TCP_HC_TAO_CCSENT, to.to_ccecho, 0);
1397
1398 tp->rcv_adv += tp->rcv_wnd;
1399 tp->snd_una++; /* SYN is acked */
1400 /*
1401 * If there's data, delay ACK; if there's also a FIN
1402 * ACKNOW will be turned on later.
1403 */
1404 if (DELAY_ACK(tp) && tlen != 0)
1405 callout_reset(tp->tt_delack, tcp_delacktime,
1406 tcp_timer_delack, tp);
1407 else
1408 tp->t_flags |= TF_ACKNOW;
1409 /*
1410 * Received <SYN,ACK> in SYN_SENT[*] state.
1411 * Transitions:
1412 * SYN_SENT --> ESTABLISHED
1413 * SYN_SENT* --> FIN_WAIT_1
1414 */
1415 tp->t_starttime = ticks;
1416 if (tp->t_flags & TF_NEEDFIN) {
1417 tp->t_state = TCPS_FIN_WAIT_1;
1418 tp->t_flags &= ~TF_NEEDFIN;
1419 thflags &= ~TH_SYN;
1420 } else {
1421 tp->t_state = TCPS_ESTABLISHED;
1422 callout_reset(tp->tt_keep, tcp_keepidle,
1423 tcp_timer_keep, tp);
1424 }
1425 } else {
1426 /*
1427 * Received initial SYN in SYN-SENT[*] state =>
1428 * simultaneous open. If segment contains CC option
1429 * and there is a cached CC, apply TAO test.
1430 * If it succeeds, connection is * half-synchronized.
1431 * Otherwise, do 3-way handshake:
1432 * SYN-SENT -> SYN-RECEIVED
1433 * SYN-SENT* -> SYN-RECEIVED*
1434 * If there was no CC option, clear cached CC value.
1435 */
1436 tp->t_flags |= TF_ACKNOW;
1437 callout_stop(tp->tt_rexmt);
1438 if (to.to_flags & TOF_CC) {
1439 if (tao.tao_cc != 0 &&
1440 CC_GT(to.to_cc, tao.tao_cc)) {
1441 /*
1442 * update cache and make transition:
1443 * SYN-SENT -> ESTABLISHED*
1444 * SYN-SENT* -> FIN-WAIT-1*
1445 */
1446 tao.tao_cc = to.to_cc;
1447 tcp_hc_updatetao(&inp->inp_inc,
1448 TCP_HC_TAO_CC, to.to_cc, 0);
1449 tp->t_starttime = ticks;
1450 if (tp->t_flags & TF_NEEDFIN) {
1451 tp->t_state = TCPS_FIN_WAIT_1;
1452 tp->t_flags &= ~TF_NEEDFIN;
1453 } else {
1454 tp->t_state = TCPS_ESTABLISHED;
1455 callout_reset(tp->tt_keep,
1456 tcp_keepidle,
1457 tcp_timer_keep,
1458 tp);
1459 }
1460 tp->t_flags |= TF_NEEDSYN;
1461 } else
1462 tp->t_state = TCPS_SYN_RECEIVED;
1463 } else {
1464 if (tcp_do_rfc1644) {
1465 /* CC.NEW or no option => invalidate cache */
1466 tao.tao_cc = 0;
1467 tcp_hc_updatetao(&inp->inp_inc,
1468 TCP_HC_TAO_CC, to.to_cc, 0);
1469 }
1470 tp->t_state = TCPS_SYN_RECEIVED;
1471 }
1472 }
1473
1474trimthenstep6:
1475 /*
1476 * Advance th->th_seq to correspond to first data byte.
1477 * If data, trim to stay within window,
1478 * dropping FIN if necessary.
1479 */
1480 th->th_seq++;
1481 if (tlen > tp->rcv_wnd) {
1482 todrop = tlen - tp->rcv_wnd;
1483 m_adj(m, -todrop);
1484 tlen = tp->rcv_wnd;
1485 thflags &= ~TH_FIN;
1486 tcpstat.tcps_rcvpackafterwin++;
1487 tcpstat.tcps_rcvbyteafterwin += todrop;
1488 }
1489 tp->snd_wl1 = th->th_seq - 1;
1490 tp->rcv_up = th->th_seq;
1491 /*
1492 * Client side of transaction: already sent SYN and data.
1493 * If the remote host used T/TCP to validate the SYN,
1494 * our data will be ACK'd; if so, enter normal data segment
1495 * processing in the middle of step 5, ack processing.
1496 * Otherwise, goto step 6.
1497 */
1498 if (thflags & TH_ACK)
1499 goto process_ACK;
1500
1501 goto step6;
1502
1503 /*
1504 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
1505 * if segment contains a SYN and CC [not CC.NEW] option:
1506 * if state == TIME_WAIT and connection duration > MSL,
1507 * drop packet and send RST;
1508 *
1509 * if SEG.CC > CCrecv then is new SYN, and can implicitly
1510 * ack the FIN (and data) in retransmission queue.
1511 * Complete close and delete TCPCB. Then reprocess
1512 * segment, hoping to find new TCPCB in LISTEN state;
1513 *
1514 * else must be old SYN; drop it.
1515 * else do normal processing.
1516 */
1517 case TCPS_LAST_ACK:
1518 case TCPS_CLOSING:
1519 case TCPS_TIME_WAIT:
1520 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
1521 if ((thflags & TH_SYN) &&
1522 (to.to_flags & TOF_CC) && tp->cc_recv != 0) {
1523 if (tp->t_state == TCPS_TIME_WAIT &&
1524 (ticks - tp->t_starttime) > tcp_msl) {
1525 rstreason = BANDLIM_UNLIMITED;
1526 goto dropwithreset;
1527 }
1528 if (CC_GT(to.to_cc, tp->cc_recv)) {
1529 tp = tcp_close(tp);
1530 goto findpcb;
1531 }
1532 else
1533 goto drop;
1534 }
1535 break; /* continue normal processing */
1536 }
1537
1538 /*
1539 * States other than LISTEN or SYN_SENT.
1540 * First check the RST flag and sequence number since reset segments
1541 * are exempt from the timestamp and connection count tests. This
1542 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
1543 * below which allowed reset segments in half the sequence space
1544 * to fall though and be processed (which gives forged reset
1545 * segments with a random sequence number a 50 percent chance of
1546 * killing a connection).
1547 * Then check timestamp, if present.
1548 * Then check the connection count, if present.
1549 * Then check that at least some bytes of segment are within
1550 * receive window. If segment begins before rcv_nxt,
1551 * drop leading data (and SYN); if nothing left, just ack.
1552 *
1553 *
1554 * If the RST bit is set, check the sequence number to see
1555 * if this is a valid reset segment.
1556 * RFC 793 page 37:
1557 * In all states except SYN-SENT, all reset (RST) segments
1558 * are validated by checking their SEQ-fields. A reset is
1559 * valid if its sequence number is in the window.
1560 * Note: this does not take into account delayed ACKs, so
1561 * we should test against last_ack_sent instead of rcv_nxt.
1562 * The sequence number in the reset segment is normally an
1563 * echo of our outgoing acknowlegement numbers, but some hosts
1564 * send a reset with the sequence number at the rightmost edge
1565 * of our receive window, and we have to handle this case.
1566 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
1567 * that brute force RST attacks are possible. To combat this,
1568 * we use a much stricter check while in the ESTABLISHED state,
1569 * only accepting RSTs where the sequence number is equal to
1570 * last_ack_sent. In all other states (the states in which a
1571 * RST is more likely), the more permissive check is used.
1572 * If we have multiple segments in flight, the intial reset
1573 * segment sequence numbers will be to the left of last_ack_sent,
1574 * but they will eventually catch up.
1575 * In any case, it never made sense to trim reset segments to
1576 * fit the receive window since RFC 1122 says:
1577 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
1578 *
1579 * A TCP SHOULD allow a received RST segment to include data.
1580 *
1581 * DISCUSSION
1582 * It has been suggested that a RST segment could contain
1583 * ASCII text that encoded and explained the cause of the
1584 * RST. No standard has yet been established for such
1585 * data.
1586 *
1587 * If the reset segment passes the sequence number test examine
1588 * the state:
1589 * SYN_RECEIVED STATE:
1590 * If passive open, return to LISTEN state.
1591 * If active open, inform user that connection was refused.
1592 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1593 * Inform user that connection was reset, and close tcb.
1594 * CLOSING, LAST_ACK STATES:
1595 * Close the tcb.
1596 * TIME_WAIT STATE:
1597 * Drop the segment - see Stevens, vol. 2, p. 964 and
1598 * RFC 1337.
1599 */
1600 if (thflags & TH_RST) {
1601 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
1602 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1603 switch (tp->t_state) {
1604
1605 case TCPS_SYN_RECEIVED:
1606 so->so_error = ECONNREFUSED;
1607 goto close;
1608
1609 case TCPS_ESTABLISHED:
1610 if (tp->last_ack_sent != th->th_seq) {
1611 tcpstat.tcps_badrst++;
1612 goto drop;
1613 }
1614 case TCPS_FIN_WAIT_1:
1615 case TCPS_FIN_WAIT_2:
1616 case TCPS_CLOSE_WAIT:
1617 so->so_error = ECONNRESET;
1618 close:
1619 tp->t_state = TCPS_CLOSED;
1620 tcpstat.tcps_drops++;
1621 tp = tcp_close(tp);
1622 break;
1623
1624 case TCPS_CLOSING:
1625 case TCPS_LAST_ACK:
1626 tp = tcp_close(tp);
1627 break;
1628
1629 case TCPS_TIME_WAIT:
1630 KASSERT(tp->t_state != TCPS_TIME_WAIT,
1631 ("timewait"));
1632 break;
1633 }
1634 }
1635 goto drop;
1636 }
1637
1638 /*
1639 * RFC 1323 PAWS: If we have a timestamp reply on this segment
1640 * and it's less than ts_recent, drop it.
1641 */
1642 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
1643 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
1644
1645 /* Check to see if ts_recent is over 24 days old. */
1646 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) {
1647 /*
1648 * Invalidate ts_recent. If this segment updates
1649 * ts_recent, the age will be reset later and ts_recent
1650 * will get a valid value. If it does not, setting
1651 * ts_recent to zero will at least satisfy the
1652 * requirement that zero be placed in the timestamp
1653 * echo reply when ts_recent isn't valid. The
1654 * age isn't reset until we get a valid ts_recent
1655 * because we don't want out-of-order segments to be
1656 * dropped when ts_recent is old.
1657 */
1658 tp->ts_recent = 0;
1659 } else {
1660 tcpstat.tcps_rcvduppack++;
1661 tcpstat.tcps_rcvdupbyte += tlen;
1662 tcpstat.tcps_pawsdrop++;
1663 if (tlen)
1664 goto dropafterack;
1665 goto drop;
1666 }
1667 }
1668
1669 /*
1670 * T/TCP mechanism
1671 * If T/TCP was negotiated and the segment doesn't have CC,
1672 * or if its CC is wrong then drop the segment.
1673 * RST segments do not have to comply with this.
1674 */
1675 if ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) == (TF_REQ_CC|TF_RCVD_CC) &&
1676 ((to.to_flags & TOF_CC) == 0 || tp->cc_recv != to.to_cc))
1677 goto dropafterack;
1678
1679 /*
1680 * In the SYN-RECEIVED state, validate that the packet belongs to
1681 * this connection before trimming the data to fit the receive
1682 * window. Check the sequence number versus IRS since we know
1683 * the sequence numbers haven't wrapped. This is a partial fix
1684 * for the "LAND" DoS attack.
1685 */
1686 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
1687 rstreason = BANDLIM_RST_OPENPORT;
1688 goto dropwithreset;
1689 }
1690
1691 todrop = tp->rcv_nxt - th->th_seq;
1692 if (todrop > 0) {
1693 if (thflags & TH_SYN) {
1694 thflags &= ~TH_SYN;
1695 th->th_seq++;
1696 if (th->th_urp > 1)
1697 th->th_urp--;
1698 else
1699 thflags &= ~TH_URG;
1700 todrop--;
1701 }
1702 /*
1703 * Following if statement from Stevens, vol. 2, p. 960.
1704 */
1705 if (todrop > tlen
1706 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1707 /*
1708 * Any valid FIN must be to the left of the window.
1709 * At this point the FIN must be a duplicate or out
1710 * of sequence; drop it.
1711 */
1712 thflags &= ~TH_FIN;
1713
1714 /*
1715 * Send an ACK to resynchronize and drop any data.
1716 * But keep on processing for RST or ACK.
1717 */
1718 tp->t_flags |= TF_ACKNOW;
1719 todrop = tlen;
1720 tcpstat.tcps_rcvduppack++;
1721 tcpstat.tcps_rcvdupbyte += todrop;
1722 } else {
1723 tcpstat.tcps_rcvpartduppack++;
1724 tcpstat.tcps_rcvpartdupbyte += todrop;
1725 }
1726 drop_hdrlen += todrop; /* drop from the top afterwards */
1727 th->th_seq += todrop;
1728 tlen -= todrop;
1729 if (th->th_urp > todrop)
1730 th->th_urp -= todrop;
1731 else {
1732 thflags &= ~TH_URG;
1733 th->th_urp = 0;
1734 }
1735 }
1736
1737 /*
1738 * If new data are received on a connection after the
1739 * user processes are gone, then RST the other end.
1740 */
1741 if ((so->so_state & SS_NOFDREF) &&
1742 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1743 tp = tcp_close(tp);
1744 tcpstat.tcps_rcvafterclose++;
1745 rstreason = BANDLIM_UNLIMITED;
1746 goto dropwithreset;
1747 }
1748
1749 /*
1750 * If segment ends after window, drop trailing data
1751 * (and PUSH and FIN); if nothing left, just ACK.
1752 */
1753 todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd);
1754 if (todrop > 0) {
1755 tcpstat.tcps_rcvpackafterwin++;
1756 if (todrop >= tlen) {
1757 tcpstat.tcps_rcvbyteafterwin += tlen;
1758 /*
1759 * If a new connection request is received
1760 * while in TIME_WAIT, drop the old connection
1761 * and start over if the sequence numbers
1762 * are above the previous ones.
1763 */
1764 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
1765 if (thflags & TH_SYN &&
1766 tp->t_state == TCPS_TIME_WAIT &&
1767 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1768 tp = tcp_close(tp);
1769 goto findpcb;
1770 }
1771 /*
1772 * If window is closed can only take segments at
1773 * window edge, and have to drop data and PUSH from
1774 * incoming segments. Continue processing, but
1775 * remember to ack. Otherwise, drop segment
1776 * and ack.
1777 */
1778 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1779 tp->t_flags |= TF_ACKNOW;
1780 tcpstat.tcps_rcvwinprobe++;
1781 } else
1782 goto dropafterack;
1783 } else
1784 tcpstat.tcps_rcvbyteafterwin += todrop;
1785 m_adj(m, -todrop);
1786 tlen -= todrop;
1787 thflags &= ~(TH_PUSH|TH_FIN);
1788 }
1789
1790 /*
1791 * If last ACK falls within this segment's sequence numbers,
1792 * record its timestamp.
1793 * NOTE that the test is modified according to the latest
1794 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1795 */
1796 if ((to.to_flags & TOF_TS) != 0 &&
1797 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1798 tp->ts_recent_age = ticks;
1799 tp->ts_recent = to.to_tsval;
1800 }
1801
1802 /*
1803 * If a SYN is in the window, then this is an
1804 * error and we send an RST and drop the connection.
1805 */
1806 if (thflags & TH_SYN) {
1807 tp = tcp_drop(tp, ECONNRESET);
1808 rstreason = BANDLIM_UNLIMITED;
1809 goto drop;
1810 }
1811
1812 /*
1813 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
1814 * flag is on (half-synchronized state), then queue data for
1815 * later processing; else drop segment and return.
1816 */
1817 if ((thflags & TH_ACK) == 0) {
1818 if (tp->t_state == TCPS_SYN_RECEIVED ||
1819 (tp->t_flags & TF_NEEDSYN))
1820 goto step6;
1821 else
1822 goto drop;
1823 }
1824
1825 /*
1826 * Ack processing.
1827 */
1828 switch (tp->t_state) {
1829
1830 /*
1831 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
1832 * ESTABLISHED state and continue processing.
1833 * The ACK was checked above.
1834 */
1835 case TCPS_SYN_RECEIVED:
1836
1837 tcpstat.tcps_connects++;
1838 soisconnected(so);
1839 /* Do window scaling? */
1840 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1841 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1842 tp->snd_scale = tp->requested_s_scale;
1843 tp->rcv_scale = tp->request_r_scale;
1844 }
1845 /*
1846 * Upon successful completion of 3-way handshake,
1847 * update cache.CC, pass any queued data to the user,
1848 * and advance state appropriately.
1849 */
1850 if (tcp_do_rfc1644) {
1851 tao.tao_cc = tp->cc_recv;
1852 tcp_hc_updatetao(&inp->inp_inc, TCP_HC_TAO_CC,
1853 tp->cc_recv, 0);
1854 }
1855 /*
1856 * Make transitions:
1857 * SYN-RECEIVED -> ESTABLISHED
1858 * SYN-RECEIVED* -> FIN-WAIT-1
1859 */
1860 tp->t_starttime = ticks;
1861 if (tp->t_flags & TF_NEEDFIN) {
1862 tp->t_state = TCPS_FIN_WAIT_1;
1863 tp->t_flags &= ~TF_NEEDFIN;
1864 } else {
1865 tp->t_state = TCPS_ESTABLISHED;
1866 callout_reset(tp->tt_keep, tcp_keepidle,
1867 tcp_timer_keep, tp);
1868 }
1869 /*
1870 * If segment contains data or ACK, will call tcp_reass()
1871 * later; if not, do so now to pass queued data to user.
1872 */
1873 if (tlen == 0 && (thflags & TH_FIN) == 0)
1874 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
1875 (struct mbuf *)0);
1876 tp->snd_wl1 = th->th_seq - 1;
1877 /* FALLTHROUGH */
1878
1879 /*
1880 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1881 * ACKs. If the ack is in the range
1882 * tp->snd_una < th->th_ack <= tp->snd_max
1883 * then advance tp->snd_una to th->th_ack and drop
1884 * data from the retransmission queue. If this ACK reflects
1885 * more up to date window information we update our window information.
1886 */
1887 case TCPS_ESTABLISHED:
1888 case TCPS_FIN_WAIT_1:
1889 case TCPS_FIN_WAIT_2:
1890 case TCPS_CLOSE_WAIT:
1891 case TCPS_CLOSING:
1892 case TCPS_LAST_ACK:
1893 case TCPS_TIME_WAIT:
1894 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
1895 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
1896 if (tlen == 0 && tiwin == tp->snd_wnd) {
1897 tcpstat.tcps_rcvdupack++;
1898 /*
1899 * If we have outstanding data (other than
1900 * a window probe), this is a completely
1901 * duplicate ack (ie, window info didn't
1902 * change), the ack is the biggest we've
1903 * seen and we've seen exactly our rexmt
1904 * threshhold of them, assume a packet
1905 * has been dropped and retransmit it.
1906 * Kludge snd_nxt & the congestion
1907 * window so we send only this one
1908 * packet.
1909 *
1910 * We know we're losing at the current
1911 * window size so do congestion avoidance
1912 * (set ssthresh to half the current window
1913 * and pull our congestion window back to
1914 * the new ssthresh).
1915 *
1916 * Dup acks mean that packets have left the
1917 * network (they're now cached at the receiver)
1918 * so bump cwnd by the amount in the receiver
1919 * to keep a constant cwnd packets in the
1920 * network.
1921 */
1922 if (!callout_active(tp->tt_rexmt) ||
1923 th->th_ack != tp->snd_una)
1924 tp->t_dupacks = 0;
1925 else if (++tp->t_dupacks > tcprexmtthresh ||
1926 ((tcp_do_newreno || tp->sack_enable) &&
1927 IN_FASTRECOVERY(tp))) {
1928 tp->snd_cwnd += tp->t_maxseg;
1929 (void) tcp_output(tp);
1930 goto drop;
1931 } else if (tp->t_dupacks == tcprexmtthresh) {
1932 tcp_seq onxt = tp->snd_nxt;
1933 u_int win;
1934 if ((tcp_do_newreno ||
1935 tp->sack_enable) &&
1936 SEQ_LEQ(th->th_ack,
1937 tp->snd_recover)) {
1938 tp->t_dupacks = 0;
1939 break;
1940 }
1941 win = min(tp->snd_wnd, tp->snd_cwnd) /
1942 2 / tp->t_maxseg;
1943 if (win < 2)
1944 win = 2;
1945 tp->snd_ssthresh = win * tp->t_maxseg;
1946 ENTER_FASTRECOVERY(tp);
1947 tp->snd_recover = tp->snd_max;
1948 callout_stop(tp->tt_rexmt);
1949 tp->t_rtttime = 0;
1950 if (tp->sack_enable) {
1951 tcpstat.tcps_sack_recovery_episode++;
1952 tp->snd_cwnd =
1953 tp->t_maxseg *
1954 tp->t_dupacks;
1955 (void) tcp_output(tp);
1956 tp->snd_cwnd =
1957 tp->snd_ssthresh;
1958 goto drop;
1959 }
1960
1961 tp->snd_nxt = th->th_ack;
1962 tp->snd_cwnd = tp->t_maxseg;
1963 (void) tcp_output(tp);
1964 KASSERT(tp->snd_limited <= 2,
1965 ("tp->snd_limited too big"));
1966 tp->snd_cwnd = tp->snd_ssthresh +
1967 tp->t_maxseg *
1968 (tp->t_dupacks - tp->snd_limited);
1969 if (SEQ_GT(onxt, tp->snd_nxt))
1970 tp->snd_nxt = onxt;
1971 goto drop;
1972 } else if (tcp_do_rfc3042) {
1973 u_long oldcwnd = tp->snd_cwnd;
1974 tcp_seq oldsndmax = tp->snd_max;
1975 u_int sent;
1976
1977 KASSERT(tp->t_dupacks == 1 ||
1978 tp->t_dupacks == 2,
1979 ("dupacks not 1 or 2"));
1980 if (tp->t_dupacks == 1)
1981 tp->snd_limited = 0;
1982 tp->snd_cwnd =
1983 (tp->snd_nxt - tp->snd_una) +
1984 (tp->t_dupacks - tp->snd_limited) *
1985 tp->t_maxseg;
1986 (void) tcp_output(tp);
1987 sent = tp->snd_max - oldsndmax;
1988 if (sent > tp->t_maxseg) {
1989 KASSERT((tp->t_dupacks == 2 &&
1990 tp->snd_limited == 0) ||
1991 (sent == tp->t_maxseg + 1 &&
1992 tp->t_flags & TF_SENTFIN),
1993 ("sent too much"));
1994 tp->snd_limited = 2;
1995 } else if (sent > 0)
1996 ++tp->snd_limited;
1997 tp->snd_cwnd = oldcwnd;
1998 goto drop;
1999 }
2000 } else
2001 tp->t_dupacks = 0;
2002 break;
2003 }
2004
2005 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), ("th_ack <= snd_una"));
2006
2007 /*
2008 * If the congestion window was inflated to account
2009 * for the other side's cached packets, retract it.
2010 */
2011 if (tcp_do_newreno || tp->sack_enable) {
2012 if (IN_FASTRECOVERY(tp)) {
2013 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2014 if (tp->sack_enable)
2015 tcp_sack_partialack(tp, th);
2016 else
2017 tcp_newreno_partial_ack(tp, th);
2018 } else {
2019 /*
2020 * Out of fast recovery.
2021 * Window inflation should have left us
2022 * with approximately snd_ssthresh
2023 * outstanding data.
2024 * But in case we would be inclined to
2025 * send a burst, better to do it via
2026 * the slow start mechanism.
2027 */
2028 if (SEQ_GT(th->th_ack +
2029 tp->snd_ssthresh,
2030 tp->snd_max))
2031 tp->snd_cwnd = tp->snd_max -
2032 th->th_ack +
2033 tp->t_maxseg;
2034 else
2035 tp->snd_cwnd = tp->snd_ssthresh;
2036 }
2037 }
2038 } else {
2039 if (tp->t_dupacks >= tcprexmtthresh &&
2040 tp->snd_cwnd > tp->snd_ssthresh)
2041 tp->snd_cwnd = tp->snd_ssthresh;
2042 }
2043 tp->t_dupacks = 0;
2044 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2045 tcpstat.tcps_rcvacktoomuch++;
2046 goto dropafterack;
2047 }
2048 /*
2049 * If we reach this point, ACK is not a duplicate,
2050 * i.e., it ACKs something we sent.
2051 */
2052 if (tp->t_flags & TF_NEEDSYN) {
2053 /*
2054 * T/TCP: Connection was half-synchronized, and our
2055 * SYN has been ACK'd (so connection is now fully
2056 * synchronized). Go to non-starred state,
2057 * increment snd_una for ACK of SYN, and check if
2058 * we can do window scaling.
2059 */
2060 tp->t_flags &= ~TF_NEEDSYN;
2061 tp->snd_una++;
2062 /* Do window scaling? */
2063 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2064 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2065 tp->snd_scale = tp->requested_s_scale;
2066 tp->rcv_scale = tp->request_r_scale;
2067 }
2068 }
2069
2070process_ACK:
2071 acked = th->th_ack - tp->snd_una;
2072 tcpstat.tcps_rcvackpack++;
2073 tcpstat.tcps_rcvackbyte += acked;
2074
2075 /*
2076 * If we just performed our first retransmit, and the ACK
2077 * arrives within our recovery window, then it was a mistake
2078 * to do the retransmit in the first place. Recover our
2079 * original cwnd and ssthresh, and proceed to transmit where
2080 * we left off.
2081 */
2082 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
2083 ++tcpstat.tcps_sndrexmitbad;
2084 tp->snd_cwnd = tp->snd_cwnd_prev;
2085 tp->snd_ssthresh = tp->snd_ssthresh_prev;
2086 tp->snd_recover = tp->snd_recover_prev;
2087 if (tp->t_flags & TF_WASFRECOVERY)
2088 ENTER_FASTRECOVERY(tp);
2089 tp->snd_nxt = tp->snd_max;
2090 tp->t_badrxtwin = 0; /* XXX probably not required */
2091 }
2092
2093 /*
2094 * If we have a timestamp reply, update smoothed
2095 * round trip time. If no timestamp is present but
2096 * transmit timer is running and timed sequence
2097 * number was acked, update smoothed round trip time.
2098 * Since we now have an rtt measurement, cancel the
2099 * timer backoff (cf., Phil Karn's retransmit alg.).
2100 * Recompute the initial retransmit timer.
2101 *
2102 * Some boxes send broken timestamp replies
2103 * during the SYN+ACK phase, ignore
2104 * timestamps of 0 or we could calculate a
2105 * huge RTT and blow up the retransmit timer.
2106 */
2107 if ((to.to_flags & TOF_TS) != 0 &&
2108 to.to_tsecr) {
2109 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
2110 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2111 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2112 }
2113 tcp_xmit_bandwidth_limit(tp, th->th_ack);
2114
2115 /*
2116 * If all outstanding data is acked, stop retransmit
2117 * timer and remember to restart (more output or persist).
2118 * If there is more data to be acked, restart retransmit
2119 * timer, using current (possibly backed-off) value.
2120 */
2121 if (th->th_ack == tp->snd_max) {
2122 callout_stop(tp->tt_rexmt);
2123 needoutput = 1;
2124 } else if (!callout_active(tp->tt_persist))
2125 callout_reset(tp->tt_rexmt, tp->t_rxtcur,
2126 tcp_timer_rexmt, tp);
2127
2128 /*
2129 * If no data (only SYN) was ACK'd,
2130 * skip rest of ACK processing.
2131 */
2132 if (acked == 0)
2133 goto step6;
2134
2135 /*
2136 * When new data is acked, open the congestion window.
2137 * If the window gives us less than ssthresh packets
2138 * in flight, open exponentially (maxseg per packet).
2139 * Otherwise open linearly: maxseg per window
2140 * (maxseg^2 / cwnd per packet).
2141 */
2142 if ((!tcp_do_newreno && !tp->sack_enable) ||
2143 !IN_FASTRECOVERY(tp)) {
2144 register u_int cw = tp->snd_cwnd;
2145 register u_int incr = tp->t_maxseg;
2146 if (cw > tp->snd_ssthresh)
2147 incr = incr * incr / cw;
2148 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale);
2149 }
2150 SOCKBUF_LOCK(&so->so_snd);
2151 if (acked > so->so_snd.sb_cc) {
2152 tp->snd_wnd -= so->so_snd.sb_cc;
2153 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2154 ourfinisacked = 1;
2155 } else {
2156 sbdrop_locked(&so->so_snd, acked);
2157 tp->snd_wnd -= acked;
2158 ourfinisacked = 0;
2159 }
2156 SOCKBUF_UNLOCK(&so->so_snd);
2157 sowwakeup(so);
2160 sowwakeup_locked(so);
2158 /* detect una wraparound */
2159 if ((tcp_do_newreno || tp->sack_enable) &&
2160 !IN_FASTRECOVERY(tp) &&
2161 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2162 SEQ_LEQ(th->th_ack, tp->snd_recover))
2163 tp->snd_recover = th->th_ack - 1;
2164 if ((tcp_do_newreno || tp->sack_enable) &&
2165 IN_FASTRECOVERY(tp) &&
2166 SEQ_GEQ(th->th_ack, tp->snd_recover))
2167 EXIT_FASTRECOVERY(tp);
2168 tp->snd_una = th->th_ack;
2169 if (tp->sack_enable) {
2170 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2171 tp->snd_recover = tp->snd_una;
2172 }
2173 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2174 tp->snd_nxt = tp->snd_una;
2175
2176 switch (tp->t_state) {
2177
2178 /*
2179 * In FIN_WAIT_1 STATE in addition to the processing
2180 * for the ESTABLISHED state if our FIN is now acknowledged
2181 * then enter FIN_WAIT_2.
2182 */
2183 case TCPS_FIN_WAIT_1:
2184 if (ourfinisacked) {
2185 /*
2186 * If we can't receive any more
2187 * data, then closing user can proceed.
2188 * Starting the timer is contrary to the
2189 * specification, but if we don't get a FIN
2190 * we'll hang forever.
2191 */
2192 /* XXXjl
2193 * we should release the tp also, and use a
2194 * compressed state.
2195 */
2196 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2197 soisdisconnected(so);
2198 callout_reset(tp->tt_2msl, tcp_maxidle,
2199 tcp_timer_2msl, tp);
2200 }
2201 tp->t_state = TCPS_FIN_WAIT_2;
2202 }
2203 break;
2204
2205 /*
2206 * In CLOSING STATE in addition to the processing for
2207 * the ESTABLISHED state if the ACK acknowledges our FIN
2208 * then enter the TIME-WAIT state, otherwise ignore
2209 * the segment.
2210 */
2211 case TCPS_CLOSING:
2212 if (ourfinisacked) {
2213 KASSERT(headlocked, ("headlocked"));
2214 tcp_twstart(tp);
2215 INP_INFO_WUNLOCK(&tcbinfo);
2216 m_freem(m);
2217 return;
2218 }
2219 break;
2220
2221 /*
2222 * In LAST_ACK, we may still be waiting for data to drain
2223 * and/or to be acked, as well as for the ack of our FIN.
2224 * If our FIN is now acknowledged, delete the TCB,
2225 * enter the closed state and return.
2226 */
2227 case TCPS_LAST_ACK:
2228 if (ourfinisacked) {
2229 tp = tcp_close(tp);
2230 goto drop;
2231 }
2232 break;
2233
2234 /*
2235 * In TIME_WAIT state the only thing that should arrive
2236 * is a retransmission of the remote FIN. Acknowledge
2237 * it and restart the finack timer.
2238 */
2239 case TCPS_TIME_WAIT:
2240 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
2241 callout_reset(tp->tt_2msl, 2 * tcp_msl,
2242 tcp_timer_2msl, tp);
2243 goto dropafterack;
2244 }
2245 }
2246
2247step6:
2248 /*
2249 * Update window information.
2250 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2251 */
2252 if ((thflags & TH_ACK) &&
2253 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2254 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2255 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2256 /* keep track of pure window updates */
2257 if (tlen == 0 &&
2258 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2259 tcpstat.tcps_rcvwinupd++;
2260 tp->snd_wnd = tiwin;
2261 tp->snd_wl1 = th->th_seq;
2262 tp->snd_wl2 = th->th_ack;
2263 if (tp->snd_wnd > tp->max_sndwnd)
2264 tp->max_sndwnd = tp->snd_wnd;
2265 needoutput = 1;
2266 }
2267
2268 /*
2269 * Process segments with URG.
2270 */
2271 if ((thflags & TH_URG) && th->th_urp &&
2272 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2273 /*
2274 * This is a kludge, but if we receive and accept
2275 * random urgent pointers, we'll crash in
2276 * soreceive. It's hard to imagine someone
2277 * actually wanting to send this much urgent data.
2278 */
2279 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2280 th->th_urp = 0; /* XXX */
2281 thflags &= ~TH_URG; /* XXX */
2282 goto dodata; /* XXX */
2283 }
2284 /*
2285 * If this segment advances the known urgent pointer,
2286 * then mark the data stream. This should not happen
2287 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2288 * a FIN has been received from the remote side.
2289 * In these states we ignore the URG.
2290 *
2291 * According to RFC961 (Assigned Protocols),
2292 * the urgent pointer points to the last octet
2293 * of urgent data. We continue, however,
2294 * to consider it to indicate the first octet
2295 * of data past the urgent section as the original
2296 * spec states (in one of two places).
2297 */
2298 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2299 tp->rcv_up = th->th_seq + th->th_urp;
2300 SOCKBUF_LOCK(&so->so_rcv);
2301 so->so_oobmark = so->so_rcv.sb_cc +
2302 (tp->rcv_up - tp->rcv_nxt) - 1;
2303 if (so->so_oobmark == 0)
2304 so->so_rcv.sb_state |= SBS_RCVATMARK;
2305 SOCKBUF_UNLOCK(&so->so_rcv);
2306 sohasoutofband(so);
2307 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2308 }
2309 /*
2310 * Remove out of band data so doesn't get presented to user.
2311 * This can happen independent of advancing the URG pointer,
2312 * but if two URG's are pending at once, some out-of-band
2313 * data may creep in... ick.
2314 */
2315 if (th->th_urp <= (u_long)tlen &&
2316 !(so->so_options & SO_OOBINLINE)) {
2317 /* hdr drop is delayed */
2318 tcp_pulloutofband(so, th, m, drop_hdrlen);
2319 }
2320 } else {
2321 /*
2322 * If no out of band data is expected,
2323 * pull receive urgent pointer along
2324 * with the receive window.
2325 */
2326 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2327 tp->rcv_up = tp->rcv_nxt;
2328 }
2329dodata: /* XXX */
2330 KASSERT(headlocked, ("headlocked"));
2331 /*
2332 * Process the segment text, merging it into the TCP sequencing queue,
2333 * and arranging for acknowledgment of receipt if necessary.
2334 * This process logically involves adjusting tp->rcv_wnd as data
2335 * is presented to the user (this happens in tcp_usrreq.c,
2336 * case PRU_RCVD). If a FIN has already been received on this
2337 * connection then we just ignore the text.
2338 */
2339 if ((tlen || (thflags & TH_FIN)) &&
2340 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2341 m_adj(m, drop_hdrlen); /* delayed header drop */
2342 /*
2343 * Insert segment which includes th into TCP reassembly queue
2344 * with control block tp. Set thflags to whether reassembly now
2345 * includes a segment with FIN. This handles the common case
2346 * inline (segment is the next to be received on an established
2347 * connection, and the queue is empty), avoiding linkage into
2348 * and removal from the queue and repetition of various
2349 * conversions.
2350 * Set DELACK for segments received in order, but ack
2351 * immediately when segments are out of order (so
2352 * fast retransmit can work).
2353 */
2354 if (th->th_seq == tp->rcv_nxt &&
2355 LIST_EMPTY(&tp->t_segq) &&
2356 TCPS_HAVEESTABLISHED(tp->t_state)) {
2357 if (DELAY_ACK(tp))
2358 tp->t_flags |= TF_DELACK;
2359 else
2360 tp->t_flags |= TF_ACKNOW;
2361 tp->rcv_nxt += tlen;
2362 thflags = th->th_flags & TH_FIN;
2363 tcpstat.tcps_rcvpack++;
2364 tcpstat.tcps_rcvbyte += tlen;
2365 ND6_HINT(tp);
2161 /* detect una wraparound */
2162 if ((tcp_do_newreno || tp->sack_enable) &&
2163 !IN_FASTRECOVERY(tp) &&
2164 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2165 SEQ_LEQ(th->th_ack, tp->snd_recover))
2166 tp->snd_recover = th->th_ack - 1;
2167 if ((tcp_do_newreno || tp->sack_enable) &&
2168 IN_FASTRECOVERY(tp) &&
2169 SEQ_GEQ(th->th_ack, tp->snd_recover))
2170 EXIT_FASTRECOVERY(tp);
2171 tp->snd_una = th->th_ack;
2172 if (tp->sack_enable) {
2173 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2174 tp->snd_recover = tp->snd_una;
2175 }
2176 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2177 tp->snd_nxt = tp->snd_una;
2178
2179 switch (tp->t_state) {
2180
2181 /*
2182 * In FIN_WAIT_1 STATE in addition to the processing
2183 * for the ESTABLISHED state if our FIN is now acknowledged
2184 * then enter FIN_WAIT_2.
2185 */
2186 case TCPS_FIN_WAIT_1:
2187 if (ourfinisacked) {
2188 /*
2189 * If we can't receive any more
2190 * data, then closing user can proceed.
2191 * Starting the timer is contrary to the
2192 * specification, but if we don't get a FIN
2193 * we'll hang forever.
2194 */
2195 /* XXXjl
2196 * we should release the tp also, and use a
2197 * compressed state.
2198 */
2199 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2200 soisdisconnected(so);
2201 callout_reset(tp->tt_2msl, tcp_maxidle,
2202 tcp_timer_2msl, tp);
2203 }
2204 tp->t_state = TCPS_FIN_WAIT_2;
2205 }
2206 break;
2207
2208 /*
2209 * In CLOSING STATE in addition to the processing for
2210 * the ESTABLISHED state if the ACK acknowledges our FIN
2211 * then enter the TIME-WAIT state, otherwise ignore
2212 * the segment.
2213 */
2214 case TCPS_CLOSING:
2215 if (ourfinisacked) {
2216 KASSERT(headlocked, ("headlocked"));
2217 tcp_twstart(tp);
2218 INP_INFO_WUNLOCK(&tcbinfo);
2219 m_freem(m);
2220 return;
2221 }
2222 break;
2223
2224 /*
2225 * In LAST_ACK, we may still be waiting for data to drain
2226 * and/or to be acked, as well as for the ack of our FIN.
2227 * If our FIN is now acknowledged, delete the TCB,
2228 * enter the closed state and return.
2229 */
2230 case TCPS_LAST_ACK:
2231 if (ourfinisacked) {
2232 tp = tcp_close(tp);
2233 goto drop;
2234 }
2235 break;
2236
2237 /*
2238 * In TIME_WAIT state the only thing that should arrive
2239 * is a retransmission of the remote FIN. Acknowledge
2240 * it and restart the finack timer.
2241 */
2242 case TCPS_TIME_WAIT:
2243 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
2244 callout_reset(tp->tt_2msl, 2 * tcp_msl,
2245 tcp_timer_2msl, tp);
2246 goto dropafterack;
2247 }
2248 }
2249
2250step6:
2251 /*
2252 * Update window information.
2253 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2254 */
2255 if ((thflags & TH_ACK) &&
2256 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2257 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2258 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2259 /* keep track of pure window updates */
2260 if (tlen == 0 &&
2261 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2262 tcpstat.tcps_rcvwinupd++;
2263 tp->snd_wnd = tiwin;
2264 tp->snd_wl1 = th->th_seq;
2265 tp->snd_wl2 = th->th_ack;
2266 if (tp->snd_wnd > tp->max_sndwnd)
2267 tp->max_sndwnd = tp->snd_wnd;
2268 needoutput = 1;
2269 }
2270
2271 /*
2272 * Process segments with URG.
2273 */
2274 if ((thflags & TH_URG) && th->th_urp &&
2275 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2276 /*
2277 * This is a kludge, but if we receive and accept
2278 * random urgent pointers, we'll crash in
2279 * soreceive. It's hard to imagine someone
2280 * actually wanting to send this much urgent data.
2281 */
2282 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2283 th->th_urp = 0; /* XXX */
2284 thflags &= ~TH_URG; /* XXX */
2285 goto dodata; /* XXX */
2286 }
2287 /*
2288 * If this segment advances the known urgent pointer,
2289 * then mark the data stream. This should not happen
2290 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2291 * a FIN has been received from the remote side.
2292 * In these states we ignore the URG.
2293 *
2294 * According to RFC961 (Assigned Protocols),
2295 * the urgent pointer points to the last octet
2296 * of urgent data. We continue, however,
2297 * to consider it to indicate the first octet
2298 * of data past the urgent section as the original
2299 * spec states (in one of two places).
2300 */
2301 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2302 tp->rcv_up = th->th_seq + th->th_urp;
2303 SOCKBUF_LOCK(&so->so_rcv);
2304 so->so_oobmark = so->so_rcv.sb_cc +
2305 (tp->rcv_up - tp->rcv_nxt) - 1;
2306 if (so->so_oobmark == 0)
2307 so->so_rcv.sb_state |= SBS_RCVATMARK;
2308 SOCKBUF_UNLOCK(&so->so_rcv);
2309 sohasoutofband(so);
2310 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2311 }
2312 /*
2313 * Remove out of band data so doesn't get presented to user.
2314 * This can happen independent of advancing the URG pointer,
2315 * but if two URG's are pending at once, some out-of-band
2316 * data may creep in... ick.
2317 */
2318 if (th->th_urp <= (u_long)tlen &&
2319 !(so->so_options & SO_OOBINLINE)) {
2320 /* hdr drop is delayed */
2321 tcp_pulloutofband(so, th, m, drop_hdrlen);
2322 }
2323 } else {
2324 /*
2325 * If no out of band data is expected,
2326 * pull receive urgent pointer along
2327 * with the receive window.
2328 */
2329 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2330 tp->rcv_up = tp->rcv_nxt;
2331 }
2332dodata: /* XXX */
2333 KASSERT(headlocked, ("headlocked"));
2334 /*
2335 * Process the segment text, merging it into the TCP sequencing queue,
2336 * and arranging for acknowledgment of receipt if necessary.
2337 * This process logically involves adjusting tp->rcv_wnd as data
2338 * is presented to the user (this happens in tcp_usrreq.c,
2339 * case PRU_RCVD). If a FIN has already been received on this
2340 * connection then we just ignore the text.
2341 */
2342 if ((tlen || (thflags & TH_FIN)) &&
2343 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2344 m_adj(m, drop_hdrlen); /* delayed header drop */
2345 /*
2346 * Insert segment which includes th into TCP reassembly queue
2347 * with control block tp. Set thflags to whether reassembly now
2348 * includes a segment with FIN. This handles the common case
2349 * inline (segment is the next to be received on an established
2350 * connection, and the queue is empty), avoiding linkage into
2351 * and removal from the queue and repetition of various
2352 * conversions.
2353 * Set DELACK for segments received in order, but ack
2354 * immediately when segments are out of order (so
2355 * fast retransmit can work).
2356 */
2357 if (th->th_seq == tp->rcv_nxt &&
2358 LIST_EMPTY(&tp->t_segq) &&
2359 TCPS_HAVEESTABLISHED(tp->t_state)) {
2360 if (DELAY_ACK(tp))
2361 tp->t_flags |= TF_DELACK;
2362 else
2363 tp->t_flags |= TF_ACKNOW;
2364 tp->rcv_nxt += tlen;
2365 thflags = th->th_flags & TH_FIN;
2366 tcpstat.tcps_rcvpack++;
2367 tcpstat.tcps_rcvbyte += tlen;
2368 ND6_HINT(tp);
2369 /* Unlocked read. */
2370 SOCKBUF_LOCK(&so->so_rcv);
2366 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2367 m_freem(m);
2368 else
2371 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2372 m_freem(m);
2373 else
2369 sbappendstream(&so->so_rcv, m);
2370 sorwakeup(so);
2374 sbappendstream_locked(&so->so_rcv, m);
2375 sorwakeup_locked(so);
2371 } else {
2372 thflags = tcp_reass(tp, th, &tlen, m);
2373 tp->t_flags |= TF_ACKNOW;
2374 }
2375 if (tp->sack_enable)
2376 tcp_update_sack_list(tp);
2377 /*
2378 * Note the amount of data that peer has sent into
2379 * our window, in order to estimate the sender's
2380 * buffer size.
2381 */
2382 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2383 } else {
2384 m_freem(m);
2385 thflags &= ~TH_FIN;
2386 }
2387
2388 /*
2389 * If FIN is received ACK the FIN and let the user know
2390 * that the connection is closing.
2391 */
2392 if (thflags & TH_FIN) {
2393 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2394 socantrcvmore(so);
2395 /*
2396 * If connection is half-synchronized
2397 * (ie NEEDSYN flag on) then delay ACK,
2398 * so it may be piggybacked when SYN is sent.
2399 * Otherwise, since we received a FIN then no
2400 * more input can be expected, send ACK now.
2401 */
2402 if (tp->t_flags & TF_NEEDSYN)
2403 tp->t_flags |= TF_DELACK;
2404 else
2405 tp->t_flags |= TF_ACKNOW;
2406 tp->rcv_nxt++;
2407 }
2408 switch (tp->t_state) {
2409
2410 /*
2411 * In SYN_RECEIVED and ESTABLISHED STATES
2412 * enter the CLOSE_WAIT state.
2413 */
2414 case TCPS_SYN_RECEIVED:
2415 tp->t_starttime = ticks;
2416 /*FALLTHROUGH*/
2417 case TCPS_ESTABLISHED:
2418 tp->t_state = TCPS_CLOSE_WAIT;
2419 break;
2420
2421 /*
2422 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2423 * enter the CLOSING state.
2424 */
2425 case TCPS_FIN_WAIT_1:
2426 tp->t_state = TCPS_CLOSING;
2427 break;
2428
2429 /*
2430 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2431 * starting the time-wait timer, turning off the other
2432 * standard timers.
2433 */
2434 case TCPS_FIN_WAIT_2:
2435 KASSERT(headlocked == 1, ("headlocked should be 1"));
2436 tcp_twstart(tp);
2437 INP_INFO_WUNLOCK(&tcbinfo);
2438 return;
2439
2440 /*
2441 * In TIME_WAIT state restart the 2 MSL time_wait timer.
2442 */
2443 case TCPS_TIME_WAIT:
2444 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
2445 callout_reset(tp->tt_2msl, 2 * tcp_msl,
2446 tcp_timer_2msl, tp);
2447 break;
2448 }
2449 }
2450 INP_INFO_WUNLOCK(&tcbinfo);
2451#ifdef TCPDEBUG
2452 if (so->so_options & SO_DEBUG)
2453 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
2454 &tcp_savetcp, 0);
2455#endif
2456
2457 /*
2458 * Return any desired output.
2459 */
2460 if (needoutput || (tp->t_flags & TF_ACKNOW))
2461 (void) tcp_output(tp);
2462
2463check_delack:
2464 if (tp->t_flags & TF_DELACK) {
2465 tp->t_flags &= ~TF_DELACK;
2466 callout_reset(tp->tt_delack, tcp_delacktime,
2467 tcp_timer_delack, tp);
2468 }
2469 INP_UNLOCK(inp);
2470 return;
2471
2472dropafterack:
2473 /*
2474 * Generate an ACK dropping incoming segment if it occupies
2475 * sequence space, where the ACK reflects our state.
2476 *
2477 * We can now skip the test for the RST flag since all
2478 * paths to this code happen after packets containing
2479 * RST have been dropped.
2480 *
2481 * In the SYN-RECEIVED state, don't send an ACK unless the
2482 * segment we received passes the SYN-RECEIVED ACK test.
2483 * If it fails send a RST. This breaks the loop in the
2484 * "LAND" DoS attack, and also prevents an ACK storm
2485 * between two listening ports that have been sent forged
2486 * SYN segments, each with the source address of the other.
2487 */
2488 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2489 (SEQ_GT(tp->snd_una, th->th_ack) ||
2490 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2491 rstreason = BANDLIM_RST_OPENPORT;
2492 goto dropwithreset;
2493 }
2494#ifdef TCPDEBUG
2495 if (so->so_options & SO_DEBUG)
2496 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2497 &tcp_savetcp, 0);
2498#endif
2499 KASSERT(headlocked, ("headlocked should be 1"));
2500 INP_INFO_WUNLOCK(&tcbinfo);
2501 m_freem(m);
2502 tp->t_flags |= TF_ACKNOW;
2503 (void) tcp_output(tp);
2504 INP_UNLOCK(inp);
2505 return;
2506
2507dropwithreset:
2508 /*
2509 * Generate a RST, dropping incoming segment.
2510 * Make ACK acceptable to originator of segment.
2511 * Don't bother to respond if destination was broadcast/multicast.
2512 */
2513 if ((thflags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
2514 goto drop;
2515 if (isipv6) {
2516 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2517 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2518 goto drop;
2519 } else {
2520 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2521 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2522 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2523 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2524 goto drop;
2525 }
2526 /* IPv6 anycast check is done at tcp6_input() */
2527
2528 /*
2529 * Perform bandwidth limiting.
2530 */
2531 if (badport_bandlim(rstreason) < 0)
2532 goto drop;
2533
2534#ifdef TCPDEBUG
2535 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2536 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2537 &tcp_savetcp, 0);
2538#endif
2539
2540 if (thflags & TH_ACK)
2541 /* mtod() below is safe as long as hdr dropping is delayed */
2542 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack,
2543 TH_RST);
2544 else {
2545 if (thflags & TH_SYN)
2546 tlen++;
2547 /* mtod() below is safe as long as hdr dropping is delayed */
2548 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
2549 (tcp_seq)0, TH_RST|TH_ACK);
2550 }
2551
2552 if (tp)
2553 INP_UNLOCK(inp);
2554 if (headlocked)
2555 INP_INFO_WUNLOCK(&tcbinfo);
2556 return;
2557
2558drop:
2559 /*
2560 * Drop space held by incoming segment and return.
2561 */
2562#ifdef TCPDEBUG
2563 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2564 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2565 &tcp_savetcp, 0);
2566#endif
2567 if (tp)
2568 INP_UNLOCK(inp);
2569 m_freem(m);
2570 if (headlocked)
2571 INP_INFO_WUNLOCK(&tcbinfo);
2572 return;
2573}
2574
2575/*
2576 * Parse TCP options and place in tcpopt.
2577 */
2578static void
2579tcp_dooptions(tp, to, cp, cnt, is_syn, th)
2580 struct tcpcb *tp;
2581 struct tcpopt *to;
2582 u_char *cp;
2583 int cnt;
2584 int is_syn;
2585 struct tcphdr *th;
2586{
2587 int opt, optlen;
2588
2589 to->to_flags = 0;
2590 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2591 opt = cp[0];
2592 if (opt == TCPOPT_EOL)
2593 break;
2594 if (opt == TCPOPT_NOP)
2595 optlen = 1;
2596 else {
2597 if (cnt < 2)
2598 break;
2599 optlen = cp[1];
2600 if (optlen < 2 || optlen > cnt)
2601 break;
2602 }
2603 switch (opt) {
2604 case TCPOPT_MAXSEG:
2605 if (optlen != TCPOLEN_MAXSEG)
2606 continue;
2607 if (!is_syn)
2608 continue;
2609 to->to_flags |= TOF_MSS;
2610 bcopy((char *)cp + 2,
2611 (char *)&to->to_mss, sizeof(to->to_mss));
2612 to->to_mss = ntohs(to->to_mss);
2613 break;
2614 case TCPOPT_WINDOW:
2615 if (optlen != TCPOLEN_WINDOW)
2616 continue;
2617 if (! is_syn)
2618 continue;
2619 to->to_flags |= TOF_SCALE;
2620 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
2621 break;
2622 case TCPOPT_TIMESTAMP:
2623 if (optlen != TCPOLEN_TIMESTAMP)
2624 continue;
2625 to->to_flags |= TOF_TS;
2626 bcopy((char *)cp + 2,
2627 (char *)&to->to_tsval, sizeof(to->to_tsval));
2628 to->to_tsval = ntohl(to->to_tsval);
2629 bcopy((char *)cp + 6,
2630 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
2631 to->to_tsecr = ntohl(to->to_tsecr);
2632 break;
2633 case TCPOPT_CC:
2634 if (optlen != TCPOLEN_CC)
2635 continue;
2636 to->to_flags |= TOF_CC;
2637 bcopy((char *)cp + 2,
2638 (char *)&to->to_cc, sizeof(to->to_cc));
2639 to->to_cc = ntohl(to->to_cc);
2640 break;
2641 case TCPOPT_CCNEW:
2642 if (optlen != TCPOLEN_CC)
2643 continue;
2644 if (!is_syn)
2645 continue;
2646 to->to_flags |= TOF_CCNEW;
2647 bcopy((char *)cp + 2,
2648 (char *)&to->to_cc, sizeof(to->to_cc));
2649 to->to_cc = ntohl(to->to_cc);
2650 break;
2651 case TCPOPT_CCECHO:
2652 if (optlen != TCPOLEN_CC)
2653 continue;
2654 if (!is_syn)
2655 continue;
2656 to->to_flags |= TOF_CCECHO;
2657 bcopy((char *)cp + 2,
2658 (char *)&to->to_ccecho, sizeof(to->to_ccecho));
2659 to->to_ccecho = ntohl(to->to_ccecho);
2660 break;
2661#ifdef TCP_SIGNATURE
2662 /*
2663 * XXX In order to reply to a host which has set the
2664 * TCP_SIGNATURE option in its initial SYN, we have to
2665 * record the fact that the option was observed here
2666 * for the syncache code to perform the correct response.
2667 */
2668 case TCPOPT_SIGNATURE:
2669 if (optlen != TCPOLEN_SIGNATURE)
2670 continue;
2671 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN);
2672 break;
2673#endif
2674 case TCPOPT_SACK_PERMITTED:
2675 if (!tcp_do_sack ||
2676 optlen != TCPOLEN_SACK_PERMITTED)
2677 continue;
2678 if (is_syn) {
2679 /* MUST only be set on SYN */
2680 to->to_flags |= TOF_SACK;
2681 }
2682 break;
2683
2684 case TCPOPT_SACK:
2685 if (!tp || tcp_sack_option(tp, th, cp, optlen))
2686 continue;
2687 break;
2688 default:
2689 continue;
2690 }
2691 }
2692}
2693
2694/*
2695 * Pull out of band byte out of a segment so
2696 * it doesn't appear in the user's data queue.
2697 * It is still reflected in the segment length for
2698 * sequencing purposes.
2699 */
2700static void
2701tcp_pulloutofband(so, th, m, off)
2702 struct socket *so;
2703 struct tcphdr *th;
2704 register struct mbuf *m;
2705 int off; /* delayed to be droped hdrlen */
2706{
2707 int cnt = off + th->th_urp - 1;
2708
2709 while (cnt >= 0) {
2710 if (m->m_len > cnt) {
2711 char *cp = mtod(m, caddr_t) + cnt;
2712 struct tcpcb *tp = sototcpcb(so);
2713
2714 tp->t_iobc = *cp;
2715 tp->t_oobflags |= TCPOOB_HAVEDATA;
2716 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
2717 m->m_len--;
2718 if (m->m_flags & M_PKTHDR)
2719 m->m_pkthdr.len--;
2720 return;
2721 }
2722 cnt -= m->m_len;
2723 m = m->m_next;
2724 if (m == 0)
2725 break;
2726 }
2727 panic("tcp_pulloutofband");
2728}
2729
2730/*
2731 * Collect new round-trip time estimate
2732 * and update averages and current timeout.
2733 */
2734static void
2735tcp_xmit_timer(tp, rtt)
2736 register struct tcpcb *tp;
2737 int rtt;
2738{
2739 register int delta;
2740
2741 tcpstat.tcps_rttupdated++;
2742 tp->t_rttupdated++;
2743 if (tp->t_srtt != 0) {
2744 /*
2745 * srtt is stored as fixed point with 5 bits after the
2746 * binary point (i.e., scaled by 8). The following magic
2747 * is equivalent to the smoothing algorithm in rfc793 with
2748 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2749 * point). Adjust rtt to origin 0.
2750 */
2751 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2752 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2753
2754 if ((tp->t_srtt += delta) <= 0)
2755 tp->t_srtt = 1;
2756
2757 /*
2758 * We accumulate a smoothed rtt variance (actually, a
2759 * smoothed mean difference), then set the retransmit
2760 * timer to smoothed rtt + 4 times the smoothed variance.
2761 * rttvar is stored as fixed point with 4 bits after the
2762 * binary point (scaled by 16). The following is
2763 * equivalent to rfc793 smoothing with an alpha of .75
2764 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2765 * rfc793's wired-in beta.
2766 */
2767 if (delta < 0)
2768 delta = -delta;
2769 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2770 if ((tp->t_rttvar += delta) <= 0)
2771 tp->t_rttvar = 1;
2772 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2773 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2774 } else {
2775 /*
2776 * No rtt measurement yet - use the unsmoothed rtt.
2777 * Set the variance to half the rtt (so our first
2778 * retransmit happens at 3*rtt).
2779 */
2780 tp->t_srtt = rtt << TCP_RTT_SHIFT;
2781 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2782 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2783 }
2784 tp->t_rtttime = 0;
2785 tp->t_rxtshift = 0;
2786
2787 /*
2788 * the retransmit should happen at rtt + 4 * rttvar.
2789 * Because of the way we do the smoothing, srtt and rttvar
2790 * will each average +1/2 tick of bias. When we compute
2791 * the retransmit timer, we want 1/2 tick of rounding and
2792 * 1 extra tick because of +-1/2 tick uncertainty in the
2793 * firing of the timer. The bias will give us exactly the
2794 * 1.5 tick we need. But, because the bias is
2795 * statistical, we have to test that we don't drop below
2796 * the minimum feasible timer (which is 2 ticks).
2797 */
2798 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2799 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2800
2801 /*
2802 * We received an ack for a packet that wasn't retransmitted;
2803 * it is probably safe to discard any error indications we've
2804 * received recently. This isn't quite right, but close enough
2805 * for now (a route might have failed after we sent a segment,
2806 * and the return path might not be symmetrical).
2807 */
2808 tp->t_softerror = 0;
2809}
2810
2811/*
2812 * Determine a reasonable value for maxseg size.
2813 * If the route is known, check route for mtu.
2814 * If none, use an mss that can be handled on the outgoing
2815 * interface without forcing IP to fragment; if bigger than
2816 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2817 * to utilize large mbufs. If no route is found, route has no mtu,
2818 * or the destination isn't local, use a default, hopefully conservative
2819 * size (usually 512 or the default IP max size, but no more than the mtu
2820 * of the interface), as we can't discover anything about intervening
2821 * gateways or networks. We also initialize the congestion/slow start
2822 * window to be a single segment if the destination isn't local.
2823 * While looking at the routing entry, we also initialize other path-dependent
2824 * parameters from pre-set or cached values in the routing entry.
2825 *
2826 * Also take into account the space needed for options that we
2827 * send regularly. Make maxseg shorter by that amount to assure
2828 * that we can send maxseg amount of data even when the options
2829 * are present. Store the upper limit of the length of options plus
2830 * data in maxopd.
2831 *
2832 *
2833 * In case of T/TCP, we call this routine during implicit connection
2834 * setup as well (offer = -1), to initialize maxseg from the cached
2835 * MSS of our peer.
2836 *
2837 * NOTE that this routine is only called when we process an incoming
2838 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt().
2839 */
2840void
2841tcp_mss(tp, offer)
2842 struct tcpcb *tp;
2843 int offer;
2844{
2845 int rtt, mss;
2846 u_long bufsize;
2847 u_long maxmtu;
2848 struct inpcb *inp = tp->t_inpcb;
2849 struct socket *so;
2850 struct hc_metrics_lite metrics;
2851 struct rmxp_tao tao;
2852 int origoffer = offer;
2853#ifdef INET6
2854 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
2855 size_t min_protoh = isipv6 ?
2856 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
2857 sizeof (struct tcpiphdr);
2858#else
2859 const size_t min_protoh = sizeof(struct tcpiphdr);
2860#endif
2861 bzero(&tao, sizeof(tao));
2862
2863 /* initialize */
2864#ifdef INET6
2865 if (isipv6) {
2866 maxmtu = tcp_maxmtu6(&inp->inp_inc);
2867 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt;
2868 } else
2869#endif
2870 {
2871 maxmtu = tcp_maxmtu(&inp->inp_inc);
2872 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
2873 }
2874 so = inp->inp_socket;
2875
2876 /*
2877 * no route to sender, stay with default mss and return
2878 */
2879 if (maxmtu == 0)
2880 return;
2881
2882 /* what have we got? */
2883 switch (offer) {
2884 case 0:
2885 /*
2886 * Offer == 0 means that there was no MSS on the SYN
2887 * segment, in this case we use tcp_mssdflt.
2888 */
2889 offer =
2890#ifdef INET6
2891 isipv6 ? tcp_v6mssdflt :
2892#endif
2893 tcp_mssdflt;
2894 break;
2895
2896 case -1:
2897 /*
2898 * Offer == -1 means that we didn't receive SYN yet,
2899 * use cached value in that case;
2900 */
2901 if (tcp_do_rfc1644)
2902 tcp_hc_gettao(&inp->inp_inc, &tao);
2903 if (tao.tao_mssopt != 0)
2904 offer = tao.tao_mssopt;
2905 /* FALLTHROUGH */
2906
2907 default:
2908 /*
2909 * Prevent DoS attack with too small MSS. Round up
2910 * to at least minmss.
2911 */
2912 offer = max(offer, tcp_minmss);
2913 /*
2914 * Sanity check: make sure that maxopd will be large
2915 * enough to allow some data on segments even if the
2916 * all the option space is used (40bytes). Otherwise
2917 * funny things may happen in tcp_output.
2918 */
2919 offer = max(offer, 64);
2920 if (tcp_do_rfc1644)
2921 tcp_hc_updatetao(&inp->inp_inc,
2922 TCP_HC_TAO_MSSOPT, 0, offer);
2923 }
2924
2925 /*
2926 * rmx information is now retrieved from tcp_hostcache
2927 */
2928 tcp_hc_get(&inp->inp_inc, &metrics);
2929
2930 /*
2931 * if there's a discovered mtu int tcp hostcache, use it
2932 * else, use the link mtu.
2933 */
2934 if (metrics.rmx_mtu)
2935 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
2936 else {
2937#ifdef INET6
2938 if (isipv6) {
2939 mss = maxmtu - min_protoh;
2940 if (!path_mtu_discovery &&
2941 !in6_localaddr(&inp->in6p_faddr))
2942 mss = min(mss, tcp_v6mssdflt);
2943 } else
2944#endif
2945 {
2946 mss = maxmtu - min_protoh;
2947 if (!path_mtu_discovery &&
2948 !in_localaddr(inp->inp_faddr))
2949 mss = min(mss, tcp_mssdflt);
2950 }
2951 }
2952 mss = min(mss, offer);
2953
2954 /*
2955 * maxopd stores the maximum length of data AND options
2956 * in a segment; maxseg is the amount of data in a normal
2957 * segment. We need to store this value (maxopd) apart
2958 * from maxseg, because now every segment carries options
2959 * and thus we normally have somewhat less data in segments.
2960 */
2961 tp->t_maxopd = mss;
2962
2963 /*
2964 * In case of T/TCP, origoffer==-1 indicates, that no segments
2965 * were received yet. In this case we just guess, otherwise
2966 * we do the same as before T/TCP.
2967 */
2968 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2969 (origoffer == -1 ||
2970 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
2971 mss -= TCPOLEN_TSTAMP_APPA;
2972 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
2973 (origoffer == -1 ||
2974 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC))
2975 mss -= TCPOLEN_CC_APPA;
2976 tp->t_maxseg = mss;
2977
2978#if (MCLBYTES & (MCLBYTES - 1)) == 0
2979 if (mss > MCLBYTES)
2980 mss &= ~(MCLBYTES-1);
2981#else
2982 if (mss > MCLBYTES)
2983 mss = mss / MCLBYTES * MCLBYTES;
2984#endif
2985 tp->t_maxseg = mss;
2986
2987 /*
2988 * If there's a pipesize, change the socket buffer to that size,
2989 * don't change if sb_hiwat is different than default (then it
2990 * has been changed on purpose with setsockopt).
2991 * Make the socket buffers an integral number of mss units;
2992 * if the mss is larger than the socket buffer, decrease the mss.
2993 */
2994 SOCKBUF_LOCK(&so->so_snd);
2995 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe)
2996 bufsize = metrics.rmx_sendpipe;
2997 else
2998 bufsize = so->so_snd.sb_hiwat;
2999 if (bufsize < mss)
3000 mss = bufsize;
3001 else {
3002 bufsize = roundup(bufsize, mss);
3003 if (bufsize > sb_max)
3004 bufsize = sb_max;
3005 if (bufsize > so->so_snd.sb_hiwat)
3006 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3007 }
3008 SOCKBUF_UNLOCK(&so->so_snd);
3009 tp->t_maxseg = mss;
3010
3011 SOCKBUF_LOCK(&so->so_rcv);
3012 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe)
3013 bufsize = metrics.rmx_recvpipe;
3014 else
3015 bufsize = so->so_rcv.sb_hiwat;
3016 if (bufsize > mss) {
3017 bufsize = roundup(bufsize, mss);
3018 if (bufsize > sb_max)
3019 bufsize = sb_max;
3020 if (bufsize > so->so_rcv.sb_hiwat)
3021 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3022 }
3023 SOCKBUF_UNLOCK(&so->so_rcv);
3024 /*
3025 * While we're here, check the others too
3026 */
3027 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
3028 tp->t_srtt = rtt;
3029 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
3030 tcpstat.tcps_usedrtt++;
3031 if (metrics.rmx_rttvar) {
3032 tp->t_rttvar = metrics.rmx_rttvar;
3033 tcpstat.tcps_usedrttvar++;
3034 } else {
3035 /* default variation is +- 1 rtt */
3036 tp->t_rttvar =
3037 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
3038 }
3039 TCPT_RANGESET(tp->t_rxtcur,
3040 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
3041 tp->t_rttmin, TCPTV_REXMTMAX);
3042 }
3043 if (metrics.rmx_ssthresh) {
3044 /*
3045 * There's some sort of gateway or interface
3046 * buffer limit on the path. Use this to set
3047 * the slow start threshhold, but set the
3048 * threshold to no less than 2*mss.
3049 */
3050 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh);
3051 tcpstat.tcps_usedssthresh++;
3052 }
3053 if (metrics.rmx_bandwidth)
3054 tp->snd_bandwidth = metrics.rmx_bandwidth;
3055
3056 /*
3057 * Set the slow-start flight size depending on whether this
3058 * is a local network or not.
3059 *
3060 * Extend this so we cache the cwnd too and retrieve it here.
3061 * Make cwnd even bigger than RFC3390 suggests but only if we
3062 * have previous experience with the remote host. Be careful
3063 * not make cwnd bigger than remote receive window or our own
3064 * send socket buffer. Maybe put some additional upper bound
3065 * on the retrieved cwnd. Should do incremental updates to
3066 * hostcache when cwnd collapses so next connection doesn't
3067 * overloads the path again.
3068 *
3069 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost.
3070 * We currently check only in syncache_socket for that.
3071 */
3072#define TCP_METRICS_CWND
3073#ifdef TCP_METRICS_CWND
3074 if (metrics.rmx_cwnd)
3075 tp->snd_cwnd = max(mss,
3076 min(metrics.rmx_cwnd / 2,
3077 min(tp->snd_wnd, so->so_snd.sb_hiwat)));
3078 else
3079#endif
3080 if (tcp_do_rfc3390)
3081 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380));
3082#ifdef INET6
3083 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
3084 (!isipv6 && in_localaddr(inp->inp_faddr)))
3085#else
3086 else if (in_localaddr(inp->inp_faddr))
3087#endif
3088 tp->snd_cwnd = mss * ss_fltsz_local;
3089 else
3090 tp->snd_cwnd = mss * ss_fltsz;
3091}
3092
3093/*
3094 * Determine the MSS option to send on an outgoing SYN.
3095 */
3096int
3097tcp_mssopt(inc)
3098 struct in_conninfo *inc;
3099{
3100 int mss = 0;
3101 u_long maxmtu = 0;
3102 u_long thcmtu = 0;
3103 size_t min_protoh;
3104#ifdef INET6
3105 int isipv6 = inc->inc_isipv6 ? 1 : 0;
3106#endif
3107
3108 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3109
3110#ifdef INET6
3111 if (isipv6) {
3112 mss = tcp_v6mssdflt;
3113 maxmtu = tcp_maxmtu6(inc);
3114 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3115 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3116 } else
3117#endif
3118 {
3119 mss = tcp_mssdflt;
3120 maxmtu = tcp_maxmtu(inc);
3121 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3122 min_protoh = sizeof(struct tcpiphdr);
3123 }
3124 if (maxmtu && thcmtu)
3125 mss = min(maxmtu, thcmtu) - min_protoh;
3126 else if (maxmtu || thcmtu)
3127 mss = max(maxmtu, thcmtu) - min_protoh;
3128
3129 return (mss);
3130}
3131
3132
3133/*
3134 * On a partial ack arrives, force the retransmission of the
3135 * next unacknowledged segment. Do not clear tp->t_dupacks.
3136 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3137 * be started again.
3138 */
3139static void
3140tcp_newreno_partial_ack(tp, th)
3141 struct tcpcb *tp;
3142 struct tcphdr *th;
3143{
3144 tcp_seq onxt = tp->snd_nxt;
3145 u_long ocwnd = tp->snd_cwnd;
3146
3147 callout_stop(tp->tt_rexmt);
3148 tp->t_rtttime = 0;
3149 tp->snd_nxt = th->th_ack;
3150 /*
3151 * Set snd_cwnd to one segment beyond acknowledged offset.
3152 * (tp->snd_una has not yet been updated when this function is called.)
3153 */
3154 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
3155 tp->t_flags |= TF_ACKNOW;
3156 (void) tcp_output(tp);
3157 tp->snd_cwnd = ocwnd;
3158 if (SEQ_GT(onxt, tp->snd_nxt))
3159 tp->snd_nxt = onxt;
3160 /*
3161 * Partial window deflation. Relies on fact that tp->snd_una
3162 * not updated yet.
3163 */
3164 tp->snd_cwnd -= (th->th_ack - tp->snd_una - tp->t_maxseg);
3165}
3166
3167/*
3168 * Returns 1 if the TIME_WAIT state was killed and we should start over,
3169 * looking for a pcb in the listen state. Returns 0 otherwise.
3170 */
3171static int
3172tcp_timewait(tw, to, th, m, tlen)
3173 struct tcptw *tw;
3174 struct tcpopt *to;
3175 struct tcphdr *th;
3176 struct mbuf *m;
3177 int tlen;
3178{
3179 int thflags;
3180 tcp_seq seq;
3181#ifdef INET6
3182 int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
3183#else
3184 const int isipv6 = 0;
3185#endif
3186
3187 thflags = th->th_flags;
3188
3189 /*
3190 * NOTE: for FIN_WAIT_2 (to be added later),
3191 * must validate sequence number before accepting RST
3192 */
3193
3194 /*
3195 * If the segment contains RST:
3196 * Drop the segment - see Stevens, vol. 2, p. 964 and
3197 * RFC 1337.
3198 */
3199 if (thflags & TH_RST)
3200 goto drop;
3201
3202 /*
3203 * If segment contains a SYN and CC [not CC.NEW] option:
3204 * if connection duration > MSL, drop packet and send RST;
3205 *
3206 * if SEG.CC > CCrecv then is new SYN.
3207 * Complete close and delete TCPCB. Then reprocess
3208 * segment, hoping to find new TCPCB in LISTEN state;
3209 *
3210 * else must be old SYN; drop it.
3211 * else do normal processing.
3212 */
3213 if ((thflags & TH_SYN) && (to->to_flags & TOF_CC) && tw->cc_recv != 0) {
3214 if ((ticks - tw->t_starttime) > tcp_msl)
3215 goto reset;
3216 if (CC_GT(to->to_cc, tw->cc_recv)) {
3217 (void) tcp_twclose(tw, 0);
3218 return (1);
3219 }
3220 goto drop;
3221 }
3222
3223#if 0
3224/* PAWS not needed at the moment */
3225 /*
3226 * RFC 1323 PAWS: If we have a timestamp reply on this segment
3227 * and it's less than ts_recent, drop it.
3228 */
3229 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
3230 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
3231 if ((thflags & TH_ACK) == 0)
3232 goto drop;
3233 goto ack;
3234 }
3235 /*
3236 * ts_recent is never updated because we never accept new segments.
3237 */
3238#endif
3239
3240 /*
3241 * If a new connection request is received
3242 * while in TIME_WAIT, drop the old connection
3243 * and start over if the sequence numbers
3244 * are above the previous ones.
3245 */
3246 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
3247 (void) tcp_twclose(tw, 0);
3248 return (1);
3249 }
3250
3251 /*
3252 * Drop the the segment if it does not contain an ACK.
3253 */
3254 if ((thflags & TH_ACK) == 0)
3255 goto drop;
3256
3257 /*
3258 * Reset the 2MSL timer if this is a duplicate FIN.
3259 */
3260 if (thflags & TH_FIN) {
3261 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
3262 if (seq + 1 == tw->rcv_nxt)
3263 tcp_timer_2msl_reset(tw, 2 * tcp_msl);
3264 }
3265
3266 /*
3267 * Acknowledge the segment if it has data or is not a duplicate ACK.
3268 */
3269 if (thflags != TH_ACK || tlen != 0 ||
3270 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
3271 tcp_twrespond(tw, TH_ACK);
3272 goto drop;
3273
3274reset:
3275 /*
3276 * Generate a RST, dropping incoming segment.
3277 * Make ACK acceptable to originator of segment.
3278 * Don't bother to respond if destination was broadcast/multicast.
3279 */
3280 if (m->m_flags & (M_BCAST|M_MCAST))
3281 goto drop;
3282 if (isipv6) {
3283 struct ip6_hdr *ip6;
3284
3285 /* IPv6 anycast check is done at tcp6_input() */
3286 ip6 = mtod(m, struct ip6_hdr *);
3287 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3288 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3289 goto drop;
3290 } else {
3291 struct ip *ip;
3292
3293 ip = mtod(m, struct ip *);
3294 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3295 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3296 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3297 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3298 goto drop;
3299 }
3300 if (thflags & TH_ACK) {
3301 tcp_respond(NULL,
3302 mtod(m, void *), th, m, 0, th->th_ack, TH_RST);
3303 } else {
3304 seq = th->th_seq + (thflags & TH_SYN ? 1 : 0);
3305 tcp_respond(NULL,
3306 mtod(m, void *), th, m, seq, 0, TH_RST|TH_ACK);
3307 }
3308 INP_UNLOCK(tw->tw_inpcb);
3309 return (0);
3310
3311drop:
3312 INP_UNLOCK(tw->tw_inpcb);
3313 m_freem(m);
3314 return (0);
3315}
2376 } else {
2377 thflags = tcp_reass(tp, th, &tlen, m);
2378 tp->t_flags |= TF_ACKNOW;
2379 }
2380 if (tp->sack_enable)
2381 tcp_update_sack_list(tp);
2382 /*
2383 * Note the amount of data that peer has sent into
2384 * our window, in order to estimate the sender's
2385 * buffer size.
2386 */
2387 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2388 } else {
2389 m_freem(m);
2390 thflags &= ~TH_FIN;
2391 }
2392
2393 /*
2394 * If FIN is received ACK the FIN and let the user know
2395 * that the connection is closing.
2396 */
2397 if (thflags & TH_FIN) {
2398 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2399 socantrcvmore(so);
2400 /*
2401 * If connection is half-synchronized
2402 * (ie NEEDSYN flag on) then delay ACK,
2403 * so it may be piggybacked when SYN is sent.
2404 * Otherwise, since we received a FIN then no
2405 * more input can be expected, send ACK now.
2406 */
2407 if (tp->t_flags & TF_NEEDSYN)
2408 tp->t_flags |= TF_DELACK;
2409 else
2410 tp->t_flags |= TF_ACKNOW;
2411 tp->rcv_nxt++;
2412 }
2413 switch (tp->t_state) {
2414
2415 /*
2416 * In SYN_RECEIVED and ESTABLISHED STATES
2417 * enter the CLOSE_WAIT state.
2418 */
2419 case TCPS_SYN_RECEIVED:
2420 tp->t_starttime = ticks;
2421 /*FALLTHROUGH*/
2422 case TCPS_ESTABLISHED:
2423 tp->t_state = TCPS_CLOSE_WAIT;
2424 break;
2425
2426 /*
2427 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2428 * enter the CLOSING state.
2429 */
2430 case TCPS_FIN_WAIT_1:
2431 tp->t_state = TCPS_CLOSING;
2432 break;
2433
2434 /*
2435 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2436 * starting the time-wait timer, turning off the other
2437 * standard timers.
2438 */
2439 case TCPS_FIN_WAIT_2:
2440 KASSERT(headlocked == 1, ("headlocked should be 1"));
2441 tcp_twstart(tp);
2442 INP_INFO_WUNLOCK(&tcbinfo);
2443 return;
2444
2445 /*
2446 * In TIME_WAIT state restart the 2 MSL time_wait timer.
2447 */
2448 case TCPS_TIME_WAIT:
2449 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("timewait"));
2450 callout_reset(tp->tt_2msl, 2 * tcp_msl,
2451 tcp_timer_2msl, tp);
2452 break;
2453 }
2454 }
2455 INP_INFO_WUNLOCK(&tcbinfo);
2456#ifdef TCPDEBUG
2457 if (so->so_options & SO_DEBUG)
2458 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
2459 &tcp_savetcp, 0);
2460#endif
2461
2462 /*
2463 * Return any desired output.
2464 */
2465 if (needoutput || (tp->t_flags & TF_ACKNOW))
2466 (void) tcp_output(tp);
2467
2468check_delack:
2469 if (tp->t_flags & TF_DELACK) {
2470 tp->t_flags &= ~TF_DELACK;
2471 callout_reset(tp->tt_delack, tcp_delacktime,
2472 tcp_timer_delack, tp);
2473 }
2474 INP_UNLOCK(inp);
2475 return;
2476
2477dropafterack:
2478 /*
2479 * Generate an ACK dropping incoming segment if it occupies
2480 * sequence space, where the ACK reflects our state.
2481 *
2482 * We can now skip the test for the RST flag since all
2483 * paths to this code happen after packets containing
2484 * RST have been dropped.
2485 *
2486 * In the SYN-RECEIVED state, don't send an ACK unless the
2487 * segment we received passes the SYN-RECEIVED ACK test.
2488 * If it fails send a RST. This breaks the loop in the
2489 * "LAND" DoS attack, and also prevents an ACK storm
2490 * between two listening ports that have been sent forged
2491 * SYN segments, each with the source address of the other.
2492 */
2493 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
2494 (SEQ_GT(tp->snd_una, th->th_ack) ||
2495 SEQ_GT(th->th_ack, tp->snd_max)) ) {
2496 rstreason = BANDLIM_RST_OPENPORT;
2497 goto dropwithreset;
2498 }
2499#ifdef TCPDEBUG
2500 if (so->so_options & SO_DEBUG)
2501 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2502 &tcp_savetcp, 0);
2503#endif
2504 KASSERT(headlocked, ("headlocked should be 1"));
2505 INP_INFO_WUNLOCK(&tcbinfo);
2506 m_freem(m);
2507 tp->t_flags |= TF_ACKNOW;
2508 (void) tcp_output(tp);
2509 INP_UNLOCK(inp);
2510 return;
2511
2512dropwithreset:
2513 /*
2514 * Generate a RST, dropping incoming segment.
2515 * Make ACK acceptable to originator of segment.
2516 * Don't bother to respond if destination was broadcast/multicast.
2517 */
2518 if ((thflags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
2519 goto drop;
2520 if (isipv6) {
2521 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2522 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2523 goto drop;
2524 } else {
2525 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2526 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2527 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2528 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2529 goto drop;
2530 }
2531 /* IPv6 anycast check is done at tcp6_input() */
2532
2533 /*
2534 * Perform bandwidth limiting.
2535 */
2536 if (badport_bandlim(rstreason) < 0)
2537 goto drop;
2538
2539#ifdef TCPDEBUG
2540 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2541 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2542 &tcp_savetcp, 0);
2543#endif
2544
2545 if (thflags & TH_ACK)
2546 /* mtod() below is safe as long as hdr dropping is delayed */
2547 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack,
2548 TH_RST);
2549 else {
2550 if (thflags & TH_SYN)
2551 tlen++;
2552 /* mtod() below is safe as long as hdr dropping is delayed */
2553 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
2554 (tcp_seq)0, TH_RST|TH_ACK);
2555 }
2556
2557 if (tp)
2558 INP_UNLOCK(inp);
2559 if (headlocked)
2560 INP_INFO_WUNLOCK(&tcbinfo);
2561 return;
2562
2563drop:
2564 /*
2565 * Drop space held by incoming segment and return.
2566 */
2567#ifdef TCPDEBUG
2568 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
2569 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
2570 &tcp_savetcp, 0);
2571#endif
2572 if (tp)
2573 INP_UNLOCK(inp);
2574 m_freem(m);
2575 if (headlocked)
2576 INP_INFO_WUNLOCK(&tcbinfo);
2577 return;
2578}
2579
2580/*
2581 * Parse TCP options and place in tcpopt.
2582 */
2583static void
2584tcp_dooptions(tp, to, cp, cnt, is_syn, th)
2585 struct tcpcb *tp;
2586 struct tcpopt *to;
2587 u_char *cp;
2588 int cnt;
2589 int is_syn;
2590 struct tcphdr *th;
2591{
2592 int opt, optlen;
2593
2594 to->to_flags = 0;
2595 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2596 opt = cp[0];
2597 if (opt == TCPOPT_EOL)
2598 break;
2599 if (opt == TCPOPT_NOP)
2600 optlen = 1;
2601 else {
2602 if (cnt < 2)
2603 break;
2604 optlen = cp[1];
2605 if (optlen < 2 || optlen > cnt)
2606 break;
2607 }
2608 switch (opt) {
2609 case TCPOPT_MAXSEG:
2610 if (optlen != TCPOLEN_MAXSEG)
2611 continue;
2612 if (!is_syn)
2613 continue;
2614 to->to_flags |= TOF_MSS;
2615 bcopy((char *)cp + 2,
2616 (char *)&to->to_mss, sizeof(to->to_mss));
2617 to->to_mss = ntohs(to->to_mss);
2618 break;
2619 case TCPOPT_WINDOW:
2620 if (optlen != TCPOLEN_WINDOW)
2621 continue;
2622 if (! is_syn)
2623 continue;
2624 to->to_flags |= TOF_SCALE;
2625 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
2626 break;
2627 case TCPOPT_TIMESTAMP:
2628 if (optlen != TCPOLEN_TIMESTAMP)
2629 continue;
2630 to->to_flags |= TOF_TS;
2631 bcopy((char *)cp + 2,
2632 (char *)&to->to_tsval, sizeof(to->to_tsval));
2633 to->to_tsval = ntohl(to->to_tsval);
2634 bcopy((char *)cp + 6,
2635 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
2636 to->to_tsecr = ntohl(to->to_tsecr);
2637 break;
2638 case TCPOPT_CC:
2639 if (optlen != TCPOLEN_CC)
2640 continue;
2641 to->to_flags |= TOF_CC;
2642 bcopy((char *)cp + 2,
2643 (char *)&to->to_cc, sizeof(to->to_cc));
2644 to->to_cc = ntohl(to->to_cc);
2645 break;
2646 case TCPOPT_CCNEW:
2647 if (optlen != TCPOLEN_CC)
2648 continue;
2649 if (!is_syn)
2650 continue;
2651 to->to_flags |= TOF_CCNEW;
2652 bcopy((char *)cp + 2,
2653 (char *)&to->to_cc, sizeof(to->to_cc));
2654 to->to_cc = ntohl(to->to_cc);
2655 break;
2656 case TCPOPT_CCECHO:
2657 if (optlen != TCPOLEN_CC)
2658 continue;
2659 if (!is_syn)
2660 continue;
2661 to->to_flags |= TOF_CCECHO;
2662 bcopy((char *)cp + 2,
2663 (char *)&to->to_ccecho, sizeof(to->to_ccecho));
2664 to->to_ccecho = ntohl(to->to_ccecho);
2665 break;
2666#ifdef TCP_SIGNATURE
2667 /*
2668 * XXX In order to reply to a host which has set the
2669 * TCP_SIGNATURE option in its initial SYN, we have to
2670 * record the fact that the option was observed here
2671 * for the syncache code to perform the correct response.
2672 */
2673 case TCPOPT_SIGNATURE:
2674 if (optlen != TCPOLEN_SIGNATURE)
2675 continue;
2676 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN);
2677 break;
2678#endif
2679 case TCPOPT_SACK_PERMITTED:
2680 if (!tcp_do_sack ||
2681 optlen != TCPOLEN_SACK_PERMITTED)
2682 continue;
2683 if (is_syn) {
2684 /* MUST only be set on SYN */
2685 to->to_flags |= TOF_SACK;
2686 }
2687 break;
2688
2689 case TCPOPT_SACK:
2690 if (!tp || tcp_sack_option(tp, th, cp, optlen))
2691 continue;
2692 break;
2693 default:
2694 continue;
2695 }
2696 }
2697}
2698
2699/*
2700 * Pull out of band byte out of a segment so
2701 * it doesn't appear in the user's data queue.
2702 * It is still reflected in the segment length for
2703 * sequencing purposes.
2704 */
2705static void
2706tcp_pulloutofband(so, th, m, off)
2707 struct socket *so;
2708 struct tcphdr *th;
2709 register struct mbuf *m;
2710 int off; /* delayed to be droped hdrlen */
2711{
2712 int cnt = off + th->th_urp - 1;
2713
2714 while (cnt >= 0) {
2715 if (m->m_len > cnt) {
2716 char *cp = mtod(m, caddr_t) + cnt;
2717 struct tcpcb *tp = sototcpcb(so);
2718
2719 tp->t_iobc = *cp;
2720 tp->t_oobflags |= TCPOOB_HAVEDATA;
2721 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
2722 m->m_len--;
2723 if (m->m_flags & M_PKTHDR)
2724 m->m_pkthdr.len--;
2725 return;
2726 }
2727 cnt -= m->m_len;
2728 m = m->m_next;
2729 if (m == 0)
2730 break;
2731 }
2732 panic("tcp_pulloutofband");
2733}
2734
2735/*
2736 * Collect new round-trip time estimate
2737 * and update averages and current timeout.
2738 */
2739static void
2740tcp_xmit_timer(tp, rtt)
2741 register struct tcpcb *tp;
2742 int rtt;
2743{
2744 register int delta;
2745
2746 tcpstat.tcps_rttupdated++;
2747 tp->t_rttupdated++;
2748 if (tp->t_srtt != 0) {
2749 /*
2750 * srtt is stored as fixed point with 5 bits after the
2751 * binary point (i.e., scaled by 8). The following magic
2752 * is equivalent to the smoothing algorithm in rfc793 with
2753 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
2754 * point). Adjust rtt to origin 0.
2755 */
2756 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
2757 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
2758
2759 if ((tp->t_srtt += delta) <= 0)
2760 tp->t_srtt = 1;
2761
2762 /*
2763 * We accumulate a smoothed rtt variance (actually, a
2764 * smoothed mean difference), then set the retransmit
2765 * timer to smoothed rtt + 4 times the smoothed variance.
2766 * rttvar is stored as fixed point with 4 bits after the
2767 * binary point (scaled by 16). The following is
2768 * equivalent to rfc793 smoothing with an alpha of .75
2769 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
2770 * rfc793's wired-in beta.
2771 */
2772 if (delta < 0)
2773 delta = -delta;
2774 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
2775 if ((tp->t_rttvar += delta) <= 0)
2776 tp->t_rttvar = 1;
2777 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
2778 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2779 } else {
2780 /*
2781 * No rtt measurement yet - use the unsmoothed rtt.
2782 * Set the variance to half the rtt (so our first
2783 * retransmit happens at 3*rtt).
2784 */
2785 tp->t_srtt = rtt << TCP_RTT_SHIFT;
2786 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
2787 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
2788 }
2789 tp->t_rtttime = 0;
2790 tp->t_rxtshift = 0;
2791
2792 /*
2793 * the retransmit should happen at rtt + 4 * rttvar.
2794 * Because of the way we do the smoothing, srtt and rttvar
2795 * will each average +1/2 tick of bias. When we compute
2796 * the retransmit timer, we want 1/2 tick of rounding and
2797 * 1 extra tick because of +-1/2 tick uncertainty in the
2798 * firing of the timer. The bias will give us exactly the
2799 * 1.5 tick we need. But, because the bias is
2800 * statistical, we have to test that we don't drop below
2801 * the minimum feasible timer (which is 2 ticks).
2802 */
2803 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
2804 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
2805
2806 /*
2807 * We received an ack for a packet that wasn't retransmitted;
2808 * it is probably safe to discard any error indications we've
2809 * received recently. This isn't quite right, but close enough
2810 * for now (a route might have failed after we sent a segment,
2811 * and the return path might not be symmetrical).
2812 */
2813 tp->t_softerror = 0;
2814}
2815
2816/*
2817 * Determine a reasonable value for maxseg size.
2818 * If the route is known, check route for mtu.
2819 * If none, use an mss that can be handled on the outgoing
2820 * interface without forcing IP to fragment; if bigger than
2821 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2822 * to utilize large mbufs. If no route is found, route has no mtu,
2823 * or the destination isn't local, use a default, hopefully conservative
2824 * size (usually 512 or the default IP max size, but no more than the mtu
2825 * of the interface), as we can't discover anything about intervening
2826 * gateways or networks. We also initialize the congestion/slow start
2827 * window to be a single segment if the destination isn't local.
2828 * While looking at the routing entry, we also initialize other path-dependent
2829 * parameters from pre-set or cached values in the routing entry.
2830 *
2831 * Also take into account the space needed for options that we
2832 * send regularly. Make maxseg shorter by that amount to assure
2833 * that we can send maxseg amount of data even when the options
2834 * are present. Store the upper limit of the length of options plus
2835 * data in maxopd.
2836 *
2837 *
2838 * In case of T/TCP, we call this routine during implicit connection
2839 * setup as well (offer = -1), to initialize maxseg from the cached
2840 * MSS of our peer.
2841 *
2842 * NOTE that this routine is only called when we process an incoming
2843 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt().
2844 */
2845void
2846tcp_mss(tp, offer)
2847 struct tcpcb *tp;
2848 int offer;
2849{
2850 int rtt, mss;
2851 u_long bufsize;
2852 u_long maxmtu;
2853 struct inpcb *inp = tp->t_inpcb;
2854 struct socket *so;
2855 struct hc_metrics_lite metrics;
2856 struct rmxp_tao tao;
2857 int origoffer = offer;
2858#ifdef INET6
2859 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
2860 size_t min_protoh = isipv6 ?
2861 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
2862 sizeof (struct tcpiphdr);
2863#else
2864 const size_t min_protoh = sizeof(struct tcpiphdr);
2865#endif
2866 bzero(&tao, sizeof(tao));
2867
2868 /* initialize */
2869#ifdef INET6
2870 if (isipv6) {
2871 maxmtu = tcp_maxmtu6(&inp->inp_inc);
2872 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt;
2873 } else
2874#endif
2875 {
2876 maxmtu = tcp_maxmtu(&inp->inp_inc);
2877 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
2878 }
2879 so = inp->inp_socket;
2880
2881 /*
2882 * no route to sender, stay with default mss and return
2883 */
2884 if (maxmtu == 0)
2885 return;
2886
2887 /* what have we got? */
2888 switch (offer) {
2889 case 0:
2890 /*
2891 * Offer == 0 means that there was no MSS on the SYN
2892 * segment, in this case we use tcp_mssdflt.
2893 */
2894 offer =
2895#ifdef INET6
2896 isipv6 ? tcp_v6mssdflt :
2897#endif
2898 tcp_mssdflt;
2899 break;
2900
2901 case -1:
2902 /*
2903 * Offer == -1 means that we didn't receive SYN yet,
2904 * use cached value in that case;
2905 */
2906 if (tcp_do_rfc1644)
2907 tcp_hc_gettao(&inp->inp_inc, &tao);
2908 if (tao.tao_mssopt != 0)
2909 offer = tao.tao_mssopt;
2910 /* FALLTHROUGH */
2911
2912 default:
2913 /*
2914 * Prevent DoS attack with too small MSS. Round up
2915 * to at least minmss.
2916 */
2917 offer = max(offer, tcp_minmss);
2918 /*
2919 * Sanity check: make sure that maxopd will be large
2920 * enough to allow some data on segments even if the
2921 * all the option space is used (40bytes). Otherwise
2922 * funny things may happen in tcp_output.
2923 */
2924 offer = max(offer, 64);
2925 if (tcp_do_rfc1644)
2926 tcp_hc_updatetao(&inp->inp_inc,
2927 TCP_HC_TAO_MSSOPT, 0, offer);
2928 }
2929
2930 /*
2931 * rmx information is now retrieved from tcp_hostcache
2932 */
2933 tcp_hc_get(&inp->inp_inc, &metrics);
2934
2935 /*
2936 * if there's a discovered mtu int tcp hostcache, use it
2937 * else, use the link mtu.
2938 */
2939 if (metrics.rmx_mtu)
2940 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
2941 else {
2942#ifdef INET6
2943 if (isipv6) {
2944 mss = maxmtu - min_protoh;
2945 if (!path_mtu_discovery &&
2946 !in6_localaddr(&inp->in6p_faddr))
2947 mss = min(mss, tcp_v6mssdflt);
2948 } else
2949#endif
2950 {
2951 mss = maxmtu - min_protoh;
2952 if (!path_mtu_discovery &&
2953 !in_localaddr(inp->inp_faddr))
2954 mss = min(mss, tcp_mssdflt);
2955 }
2956 }
2957 mss = min(mss, offer);
2958
2959 /*
2960 * maxopd stores the maximum length of data AND options
2961 * in a segment; maxseg is the amount of data in a normal
2962 * segment. We need to store this value (maxopd) apart
2963 * from maxseg, because now every segment carries options
2964 * and thus we normally have somewhat less data in segments.
2965 */
2966 tp->t_maxopd = mss;
2967
2968 /*
2969 * In case of T/TCP, origoffer==-1 indicates, that no segments
2970 * were received yet. In this case we just guess, otherwise
2971 * we do the same as before T/TCP.
2972 */
2973 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2974 (origoffer == -1 ||
2975 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
2976 mss -= TCPOLEN_TSTAMP_APPA;
2977 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
2978 (origoffer == -1 ||
2979 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC))
2980 mss -= TCPOLEN_CC_APPA;
2981 tp->t_maxseg = mss;
2982
2983#if (MCLBYTES & (MCLBYTES - 1)) == 0
2984 if (mss > MCLBYTES)
2985 mss &= ~(MCLBYTES-1);
2986#else
2987 if (mss > MCLBYTES)
2988 mss = mss / MCLBYTES * MCLBYTES;
2989#endif
2990 tp->t_maxseg = mss;
2991
2992 /*
2993 * If there's a pipesize, change the socket buffer to that size,
2994 * don't change if sb_hiwat is different than default (then it
2995 * has been changed on purpose with setsockopt).
2996 * Make the socket buffers an integral number of mss units;
2997 * if the mss is larger than the socket buffer, decrease the mss.
2998 */
2999 SOCKBUF_LOCK(&so->so_snd);
3000 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe)
3001 bufsize = metrics.rmx_sendpipe;
3002 else
3003 bufsize = so->so_snd.sb_hiwat;
3004 if (bufsize < mss)
3005 mss = bufsize;
3006 else {
3007 bufsize = roundup(bufsize, mss);
3008 if (bufsize > sb_max)
3009 bufsize = sb_max;
3010 if (bufsize > so->so_snd.sb_hiwat)
3011 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3012 }
3013 SOCKBUF_UNLOCK(&so->so_snd);
3014 tp->t_maxseg = mss;
3015
3016 SOCKBUF_LOCK(&so->so_rcv);
3017 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe)
3018 bufsize = metrics.rmx_recvpipe;
3019 else
3020 bufsize = so->so_rcv.sb_hiwat;
3021 if (bufsize > mss) {
3022 bufsize = roundup(bufsize, mss);
3023 if (bufsize > sb_max)
3024 bufsize = sb_max;
3025 if (bufsize > so->so_rcv.sb_hiwat)
3026 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3027 }
3028 SOCKBUF_UNLOCK(&so->so_rcv);
3029 /*
3030 * While we're here, check the others too
3031 */
3032 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
3033 tp->t_srtt = rtt;
3034 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
3035 tcpstat.tcps_usedrtt++;
3036 if (metrics.rmx_rttvar) {
3037 tp->t_rttvar = metrics.rmx_rttvar;
3038 tcpstat.tcps_usedrttvar++;
3039 } else {
3040 /* default variation is +- 1 rtt */
3041 tp->t_rttvar =
3042 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
3043 }
3044 TCPT_RANGESET(tp->t_rxtcur,
3045 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
3046 tp->t_rttmin, TCPTV_REXMTMAX);
3047 }
3048 if (metrics.rmx_ssthresh) {
3049 /*
3050 * There's some sort of gateway or interface
3051 * buffer limit on the path. Use this to set
3052 * the slow start threshhold, but set the
3053 * threshold to no less than 2*mss.
3054 */
3055 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh);
3056 tcpstat.tcps_usedssthresh++;
3057 }
3058 if (metrics.rmx_bandwidth)
3059 tp->snd_bandwidth = metrics.rmx_bandwidth;
3060
3061 /*
3062 * Set the slow-start flight size depending on whether this
3063 * is a local network or not.
3064 *
3065 * Extend this so we cache the cwnd too and retrieve it here.
3066 * Make cwnd even bigger than RFC3390 suggests but only if we
3067 * have previous experience with the remote host. Be careful
3068 * not make cwnd bigger than remote receive window or our own
3069 * send socket buffer. Maybe put some additional upper bound
3070 * on the retrieved cwnd. Should do incremental updates to
3071 * hostcache when cwnd collapses so next connection doesn't
3072 * overloads the path again.
3073 *
3074 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost.
3075 * We currently check only in syncache_socket for that.
3076 */
3077#define TCP_METRICS_CWND
3078#ifdef TCP_METRICS_CWND
3079 if (metrics.rmx_cwnd)
3080 tp->snd_cwnd = max(mss,
3081 min(metrics.rmx_cwnd / 2,
3082 min(tp->snd_wnd, so->so_snd.sb_hiwat)));
3083 else
3084#endif
3085 if (tcp_do_rfc3390)
3086 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380));
3087#ifdef INET6
3088 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
3089 (!isipv6 && in_localaddr(inp->inp_faddr)))
3090#else
3091 else if (in_localaddr(inp->inp_faddr))
3092#endif
3093 tp->snd_cwnd = mss * ss_fltsz_local;
3094 else
3095 tp->snd_cwnd = mss * ss_fltsz;
3096}
3097
3098/*
3099 * Determine the MSS option to send on an outgoing SYN.
3100 */
3101int
3102tcp_mssopt(inc)
3103 struct in_conninfo *inc;
3104{
3105 int mss = 0;
3106 u_long maxmtu = 0;
3107 u_long thcmtu = 0;
3108 size_t min_protoh;
3109#ifdef INET6
3110 int isipv6 = inc->inc_isipv6 ? 1 : 0;
3111#endif
3112
3113 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3114
3115#ifdef INET6
3116 if (isipv6) {
3117 mss = tcp_v6mssdflt;
3118 maxmtu = tcp_maxmtu6(inc);
3119 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3120 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3121 } else
3122#endif
3123 {
3124 mss = tcp_mssdflt;
3125 maxmtu = tcp_maxmtu(inc);
3126 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3127 min_protoh = sizeof(struct tcpiphdr);
3128 }
3129 if (maxmtu && thcmtu)
3130 mss = min(maxmtu, thcmtu) - min_protoh;
3131 else if (maxmtu || thcmtu)
3132 mss = max(maxmtu, thcmtu) - min_protoh;
3133
3134 return (mss);
3135}
3136
3137
3138/*
3139 * On a partial ack arrives, force the retransmission of the
3140 * next unacknowledged segment. Do not clear tp->t_dupacks.
3141 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3142 * be started again.
3143 */
3144static void
3145tcp_newreno_partial_ack(tp, th)
3146 struct tcpcb *tp;
3147 struct tcphdr *th;
3148{
3149 tcp_seq onxt = tp->snd_nxt;
3150 u_long ocwnd = tp->snd_cwnd;
3151
3152 callout_stop(tp->tt_rexmt);
3153 tp->t_rtttime = 0;
3154 tp->snd_nxt = th->th_ack;
3155 /*
3156 * Set snd_cwnd to one segment beyond acknowledged offset.
3157 * (tp->snd_una has not yet been updated when this function is called.)
3158 */
3159 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
3160 tp->t_flags |= TF_ACKNOW;
3161 (void) tcp_output(tp);
3162 tp->snd_cwnd = ocwnd;
3163 if (SEQ_GT(onxt, tp->snd_nxt))
3164 tp->snd_nxt = onxt;
3165 /*
3166 * Partial window deflation. Relies on fact that tp->snd_una
3167 * not updated yet.
3168 */
3169 tp->snd_cwnd -= (th->th_ack - tp->snd_una - tp->t_maxseg);
3170}
3171
3172/*
3173 * Returns 1 if the TIME_WAIT state was killed and we should start over,
3174 * looking for a pcb in the listen state. Returns 0 otherwise.
3175 */
3176static int
3177tcp_timewait(tw, to, th, m, tlen)
3178 struct tcptw *tw;
3179 struct tcpopt *to;
3180 struct tcphdr *th;
3181 struct mbuf *m;
3182 int tlen;
3183{
3184 int thflags;
3185 tcp_seq seq;
3186#ifdef INET6
3187 int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
3188#else
3189 const int isipv6 = 0;
3190#endif
3191
3192 thflags = th->th_flags;
3193
3194 /*
3195 * NOTE: for FIN_WAIT_2 (to be added later),
3196 * must validate sequence number before accepting RST
3197 */
3198
3199 /*
3200 * If the segment contains RST:
3201 * Drop the segment - see Stevens, vol. 2, p. 964 and
3202 * RFC 1337.
3203 */
3204 if (thflags & TH_RST)
3205 goto drop;
3206
3207 /*
3208 * If segment contains a SYN and CC [not CC.NEW] option:
3209 * if connection duration > MSL, drop packet and send RST;
3210 *
3211 * if SEG.CC > CCrecv then is new SYN.
3212 * Complete close and delete TCPCB. Then reprocess
3213 * segment, hoping to find new TCPCB in LISTEN state;
3214 *
3215 * else must be old SYN; drop it.
3216 * else do normal processing.
3217 */
3218 if ((thflags & TH_SYN) && (to->to_flags & TOF_CC) && tw->cc_recv != 0) {
3219 if ((ticks - tw->t_starttime) > tcp_msl)
3220 goto reset;
3221 if (CC_GT(to->to_cc, tw->cc_recv)) {
3222 (void) tcp_twclose(tw, 0);
3223 return (1);
3224 }
3225 goto drop;
3226 }
3227
3228#if 0
3229/* PAWS not needed at the moment */
3230 /*
3231 * RFC 1323 PAWS: If we have a timestamp reply on this segment
3232 * and it's less than ts_recent, drop it.
3233 */
3234 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
3235 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
3236 if ((thflags & TH_ACK) == 0)
3237 goto drop;
3238 goto ack;
3239 }
3240 /*
3241 * ts_recent is never updated because we never accept new segments.
3242 */
3243#endif
3244
3245 /*
3246 * If a new connection request is received
3247 * while in TIME_WAIT, drop the old connection
3248 * and start over if the sequence numbers
3249 * are above the previous ones.
3250 */
3251 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
3252 (void) tcp_twclose(tw, 0);
3253 return (1);
3254 }
3255
3256 /*
3257 * Drop the the segment if it does not contain an ACK.
3258 */
3259 if ((thflags & TH_ACK) == 0)
3260 goto drop;
3261
3262 /*
3263 * Reset the 2MSL timer if this is a duplicate FIN.
3264 */
3265 if (thflags & TH_FIN) {
3266 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
3267 if (seq + 1 == tw->rcv_nxt)
3268 tcp_timer_2msl_reset(tw, 2 * tcp_msl);
3269 }
3270
3271 /*
3272 * Acknowledge the segment if it has data or is not a duplicate ACK.
3273 */
3274 if (thflags != TH_ACK || tlen != 0 ||
3275 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
3276 tcp_twrespond(tw, TH_ACK);
3277 goto drop;
3278
3279reset:
3280 /*
3281 * Generate a RST, dropping incoming segment.
3282 * Make ACK acceptable to originator of segment.
3283 * Don't bother to respond if destination was broadcast/multicast.
3284 */
3285 if (m->m_flags & (M_BCAST|M_MCAST))
3286 goto drop;
3287 if (isipv6) {
3288 struct ip6_hdr *ip6;
3289
3290 /* IPv6 anycast check is done at tcp6_input() */
3291 ip6 = mtod(m, struct ip6_hdr *);
3292 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3293 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3294 goto drop;
3295 } else {
3296 struct ip *ip;
3297
3298 ip = mtod(m, struct ip *);
3299 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3300 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3301 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3302 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3303 goto drop;
3304 }
3305 if (thflags & TH_ACK) {
3306 tcp_respond(NULL,
3307 mtod(m, void *), th, m, 0, th->th_ack, TH_RST);
3308 } else {
3309 seq = th->th_seq + (thflags & TH_SYN ? 1 : 0);
3310 tcp_respond(NULL,
3311 mtod(m, void *), th, m, seq, 0, TH_RST|TH_ACK);
3312 }
3313 INP_UNLOCK(tw->tw_inpcb);
3314 return (0);
3315
3316drop:
3317 INP_UNLOCK(tw->tw_inpcb);
3318 m_freem(m);
3319 return (0);
3320}