Deleted Added
full compact
tcp_input.c (253571) tcp_input.c (254889)
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
9 * All rights reserved.
10 *
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
15 *
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 *
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
48 */
49
50#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2007-2008,2010
5 * Swinburne University of Technology, Melbourne, Australia.
6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7 * Copyright (c) 2010 The FreeBSD Foundation
8 * Copyright (c) 2010-2011 Juniper Networks, Inc.
9 * All rights reserved.
10 *
11 * Portions of this software were developed at the Centre for Advanced Internet
12 * Architectures, Swinburne University of Technology, by Lawrence Stewart,
13 * James Healy and David Hayes, made possible in part by a grant from the Cisco
14 * University Research Program Fund at Community Foundation Silicon Valley.
15 *
16 * Portions of this software were developed at the Centre for Advanced
17 * Internet Architectures, Swinburne University of Technology, Melbourne,
18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
19 *
20 * Portions of this software were developed by Robert N. M. Watson under
21 * contract to Juniper Networks, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/netinet/tcp_input.c 253571 2013-07-23 14:14:24Z ae $");
51__FBSDID("$FreeBSD: head/sys/netinet/tcp_input.c 254889 2013-08-25 21:54:41Z markj $");
52
53#include "opt_ipfw.h" /* for ipfw_fwd */
54#include "opt_inet.h"
55#include "opt_inet6.h"
56#include "opt_ipsec.h"
52
53#include "opt_ipfw.h" /* for ipfw_fwd */
54#include "opt_inet.h"
55#include "opt_inet6.h"
56#include "opt_ipsec.h"
57#include "opt_kdtrace.h"
57#include "opt_tcpdebug.h"
58
59#include <sys/param.h>
60#include <sys/kernel.h>
61#include <sys/hhook.h>
62#include <sys/malloc.h>
63#include <sys/mbuf.h>
64#include <sys/proc.h> /* for proc0 declaration */
65#include <sys/protosw.h>
58#include "opt_tcpdebug.h"
59
60#include <sys/param.h>
61#include <sys/kernel.h>
62#include <sys/hhook.h>
63#include <sys/malloc.h>
64#include <sys/mbuf.h>
65#include <sys/proc.h> /* for proc0 declaration */
66#include <sys/protosw.h>
67#include <sys/sdt.h>
66#include <sys/signalvar.h>
67#include <sys/socket.h>
68#include <sys/socketvar.h>
69#include <sys/sysctl.h>
70#include <sys/syslog.h>
71#include <sys/systm.h>
72
73#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
74
75#include <vm/uma.h>
76
77#include <net/if.h>
78#include <net/route.h>
79#include <net/vnet.h>
80
81#define TCPSTATES /* for logging */
82
83#include <netinet/cc.h>
84#include <netinet/in.h>
68#include <sys/signalvar.h>
69#include <sys/socket.h>
70#include <sys/socketvar.h>
71#include <sys/sysctl.h>
72#include <sys/syslog.h>
73#include <sys/systm.h>
74
75#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
76
77#include <vm/uma.h>
78
79#include <net/if.h>
80#include <net/route.h>
81#include <net/vnet.h>
82
83#define TCPSTATES /* for logging */
84
85#include <netinet/cc.h>
86#include <netinet/in.h>
87#include <netinet/in_kdtrace.h>
85#include <netinet/in_pcb.h>
86#include <netinet/in_systm.h>
87#include <netinet/in_var.h>
88#include <netinet/ip.h>
89#include <netinet/ip_icmp.h> /* required for icmp_var.h */
90#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
91#include <netinet/ip_var.h>
92#include <netinet/ip_options.h>
93#include <netinet/ip6.h>
94#include <netinet/icmp6.h>
95#include <netinet6/in6_pcb.h>
96#include <netinet6/ip6_var.h>
97#include <netinet6/nd6.h>
98#include <netinet/tcp_fsm.h>
99#include <netinet/tcp_seq.h>
100#include <netinet/tcp_timer.h>
101#include <netinet/tcp_var.h>
102#include <netinet6/tcp6_var.h>
103#include <netinet/tcpip.h>
104#include <netinet/tcp_syncache.h>
105#ifdef TCPDEBUG
106#include <netinet/tcp_debug.h>
107#endif /* TCPDEBUG */
108#ifdef TCP_OFFLOAD
109#include <netinet/tcp_offload.h>
110#endif
111
112#ifdef IPSEC
113#include <netipsec/ipsec.h>
114#include <netipsec/ipsec6.h>
115#endif /*IPSEC*/
116
117#include <machine/in_cksum.h>
118
119#include <security/mac/mac_framework.h>
120
121const int tcprexmtthresh = 3;
122
123int tcp_log_in_vain = 0;
124SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
125 &tcp_log_in_vain, 0,
126 "Log all incoming TCP segments to closed ports");
127
128VNET_DEFINE(int, blackhole) = 0;
129#define V_blackhole VNET(blackhole)
130SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
131 &VNET_NAME(blackhole), 0,
132 "Do not send RST on segments to closed ports");
133
134VNET_DEFINE(int, tcp_delack_enabled) = 1;
135SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
136 &VNET_NAME(tcp_delack_enabled), 0,
137 "Delay ACK to try and piggyback it onto a data packet");
138
139VNET_DEFINE(int, drop_synfin) = 0;
140#define V_drop_synfin VNET(drop_synfin)
141SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
142 &VNET_NAME(drop_synfin), 0,
143 "Drop TCP packets with SYN+FIN set");
144
145VNET_DEFINE(int, tcp_do_rfc3042) = 1;
146#define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
147SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
148 &VNET_NAME(tcp_do_rfc3042), 0,
149 "Enable RFC 3042 (Limited Transmit)");
150
151VNET_DEFINE(int, tcp_do_rfc3390) = 1;
152SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
153 &VNET_NAME(tcp_do_rfc3390), 0,
154 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
155
156SYSCTL_NODE(_net_inet_tcp, OID_AUTO, experimental, CTLFLAG_RW, 0,
157 "Experimental TCP extensions");
158
159VNET_DEFINE(int, tcp_do_initcwnd10) = 1;
160SYSCTL_VNET_INT(_net_inet_tcp_experimental, OID_AUTO, initcwnd10, CTLFLAG_RW,
161 &VNET_NAME(tcp_do_initcwnd10), 0,
162 "Enable draft-ietf-tcpm-initcwnd-05 (Increasing initial CWND to 10)");
163
164VNET_DEFINE(int, tcp_do_rfc3465) = 1;
165SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW,
166 &VNET_NAME(tcp_do_rfc3465), 0,
167 "Enable RFC 3465 (Appropriate Byte Counting)");
168
169VNET_DEFINE(int, tcp_abc_l_var) = 2;
170SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW,
171 &VNET_NAME(tcp_abc_l_var), 2,
172 "Cap the max cwnd increment during slow-start to this number of segments");
173
174static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
175
176VNET_DEFINE(int, tcp_do_ecn) = 0;
177SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW,
178 &VNET_NAME(tcp_do_ecn), 0,
179 "TCP ECN support");
180
181VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
182SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW,
183 &VNET_NAME(tcp_ecn_maxretries), 0,
184 "Max retries before giving up on ECN");
185
186VNET_DEFINE(int, tcp_insecure_rst) = 0;
187#define V_tcp_insecure_rst VNET(tcp_insecure_rst)
188SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
189 &VNET_NAME(tcp_insecure_rst), 0,
190 "Follow the old (insecure) criteria for accepting RST packets");
191
192VNET_DEFINE(int, tcp_recvspace) = 1024*64;
193#define V_tcp_recvspace VNET(tcp_recvspace)
194SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
195 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
196
197VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
198#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
199SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
200 &VNET_NAME(tcp_do_autorcvbuf), 0,
201 "Enable automatic receive buffer sizing");
202
203VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
204#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
205SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
206 &VNET_NAME(tcp_autorcvbuf_inc), 0,
207 "Incrementor step size of automatic receive buffer");
208
209VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
210#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
211SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
212 &VNET_NAME(tcp_autorcvbuf_max), 0,
213 "Max size of automatic receive buffer");
214
215VNET_DEFINE(struct inpcbhead, tcb);
216#define tcb6 tcb /* for KAME src sync over BSD*'s */
217VNET_DEFINE(struct inpcbinfo, tcbinfo);
218
219static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
220static void tcp_do_segment(struct mbuf *, struct tcphdr *,
221 struct socket *, struct tcpcb *, int, int, uint8_t,
222 int);
223static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
224 struct tcpcb *, int, int);
225static void tcp_pulloutofband(struct socket *,
226 struct tcphdr *, struct mbuf *, int);
227static void tcp_xmit_timer(struct tcpcb *, int);
228static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
229static void inline tcp_fields_to_host(struct tcphdr *);
230#ifdef TCP_SIGNATURE
231static void inline tcp_fields_to_net(struct tcphdr *);
232static int inline tcp_signature_verify_input(struct mbuf *, int, int,
233 int, struct tcpopt *, struct tcphdr *, u_int);
234#endif
235static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th,
236 uint16_t type);
237static void inline cc_conn_init(struct tcpcb *tp);
238static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th);
239static void inline hhook_run_tcp_est_in(struct tcpcb *tp,
240 struct tcphdr *th, struct tcpopt *to);
241
242/*
243 * TCP statistics are stored in an "array" of counter(9)s.
244 */
245VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
246VNET_PCPUSTAT_SYSINIT(tcpstat);
247SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
248 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
249
250#ifdef VIMAGE
251VNET_PCPUSTAT_SYSUNINIT(tcpstat);
252#endif /* VIMAGE */
253/*
254 * Kernel module interface for updating tcpstat. The argument is an index
255 * into tcpstat treated as an array.
256 */
257void
258kmod_tcpstat_inc(int statnum)
259{
260
261 counter_u64_add(VNET(tcpstat)[statnum], 1);
262}
263
264/*
265 * Wrapper for the TCP established input helper hook.
266 */
267static void inline
268hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
269{
270 struct tcp_hhook_data hhook_data;
271
272 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
273 hhook_data.tp = tp;
274 hhook_data.th = th;
275 hhook_data.to = to;
276
277 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
278 tp->osd);
279 }
280}
281
282/*
283 * CC wrapper hook functions
284 */
285static void inline
286cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
287{
288 INP_WLOCK_ASSERT(tp->t_inpcb);
289
290 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
291 if (tp->snd_cwnd <= tp->snd_wnd)
292 tp->ccv->flags |= CCF_CWND_LIMITED;
293 else
294 tp->ccv->flags &= ~CCF_CWND_LIMITED;
295
296 if (type == CC_ACK) {
297 if (tp->snd_cwnd > tp->snd_ssthresh) {
298 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
299 V_tcp_abc_l_var * tp->t_maxseg);
300 if (tp->t_bytes_acked >= tp->snd_cwnd) {
301 tp->t_bytes_acked -= tp->snd_cwnd;
302 tp->ccv->flags |= CCF_ABC_SENTAWND;
303 }
304 } else {
305 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
306 tp->t_bytes_acked = 0;
307 }
308 }
309
310 if (CC_ALGO(tp)->ack_received != NULL) {
311 /* XXXLAS: Find a way to live without this */
312 tp->ccv->curack = th->th_ack;
313 CC_ALGO(tp)->ack_received(tp->ccv, type);
314 }
315}
316
317static void inline
318cc_conn_init(struct tcpcb *tp)
319{
320 struct hc_metrics_lite metrics;
321 struct inpcb *inp = tp->t_inpcb;
322 int rtt;
323
324 INP_WLOCK_ASSERT(tp->t_inpcb);
325
326 tcp_hc_get(&inp->inp_inc, &metrics);
327
328 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
329 tp->t_srtt = rtt;
330 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
331 TCPSTAT_INC(tcps_usedrtt);
332 if (metrics.rmx_rttvar) {
333 tp->t_rttvar = metrics.rmx_rttvar;
334 TCPSTAT_INC(tcps_usedrttvar);
335 } else {
336 /* default variation is +- 1 rtt */
337 tp->t_rttvar =
338 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
339 }
340 TCPT_RANGESET(tp->t_rxtcur,
341 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
342 tp->t_rttmin, TCPTV_REXMTMAX);
343 }
344 if (metrics.rmx_ssthresh) {
345 /*
346 * There's some sort of gateway or interface
347 * buffer limit on the path. Use this to set
348 * the slow start threshhold, but set the
349 * threshold to no less than 2*mss.
350 */
351 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh);
352 TCPSTAT_INC(tcps_usedssthresh);
353 }
354
355 /*
356 * Set the initial slow-start flight size.
357 *
358 * RFC5681 Section 3.1 specifies the default conservative values.
359 * RFC3390 specifies slightly more aggressive values.
360 * Draft-ietf-tcpm-initcwnd-05 increases it to ten segments.
361 *
362 * If a SYN or SYN/ACK was lost and retransmitted, we have to
363 * reduce the initial CWND to one segment as congestion is likely
364 * requiring us to be cautious.
365 */
366 if (tp->snd_cwnd == 1)
367 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
368 else if (V_tcp_do_initcwnd10)
369 tp->snd_cwnd = min(10 * tp->t_maxseg,
370 max(2 * tp->t_maxseg, 14600));
371 else if (V_tcp_do_rfc3390)
372 tp->snd_cwnd = min(4 * tp->t_maxseg,
373 max(2 * tp->t_maxseg, 4380));
374 else {
375 /* Per RFC5681 Section 3.1 */
376 if (tp->t_maxseg > 2190)
377 tp->snd_cwnd = 2 * tp->t_maxseg;
378 else if (tp->t_maxseg > 1095)
379 tp->snd_cwnd = 3 * tp->t_maxseg;
380 else
381 tp->snd_cwnd = 4 * tp->t_maxseg;
382 }
383
384 if (CC_ALGO(tp)->conn_init != NULL)
385 CC_ALGO(tp)->conn_init(tp->ccv);
386}
387
388void inline
389cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
390{
391 INP_WLOCK_ASSERT(tp->t_inpcb);
392
393 switch(type) {
394 case CC_NDUPACK:
395 if (!IN_FASTRECOVERY(tp->t_flags)) {
396 tp->snd_recover = tp->snd_max;
397 if (tp->t_flags & TF_ECN_PERMIT)
398 tp->t_flags |= TF_ECN_SND_CWR;
399 }
400 break;
401 case CC_ECN:
402 if (!IN_CONGRECOVERY(tp->t_flags)) {
403 TCPSTAT_INC(tcps_ecn_rcwnd);
404 tp->snd_recover = tp->snd_max;
405 if (tp->t_flags & TF_ECN_PERMIT)
406 tp->t_flags |= TF_ECN_SND_CWR;
407 }
408 break;
409 case CC_RTO:
410 tp->t_dupacks = 0;
411 tp->t_bytes_acked = 0;
412 EXIT_RECOVERY(tp->t_flags);
413 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
414 tp->t_maxseg) * tp->t_maxseg;
415 tp->snd_cwnd = tp->t_maxseg;
416 break;
417 case CC_RTO_ERR:
418 TCPSTAT_INC(tcps_sndrexmitbad);
419 /* RTO was unnecessary, so reset everything. */
420 tp->snd_cwnd = tp->snd_cwnd_prev;
421 tp->snd_ssthresh = tp->snd_ssthresh_prev;
422 tp->snd_recover = tp->snd_recover_prev;
423 if (tp->t_flags & TF_WASFRECOVERY)
424 ENTER_FASTRECOVERY(tp->t_flags);
425 if (tp->t_flags & TF_WASCRECOVERY)
426 ENTER_CONGRECOVERY(tp->t_flags);
427 tp->snd_nxt = tp->snd_max;
428 tp->t_flags &= ~TF_PREVVALID;
429 tp->t_badrxtwin = 0;
430 break;
431 }
432
433 if (CC_ALGO(tp)->cong_signal != NULL) {
434 if (th != NULL)
435 tp->ccv->curack = th->th_ack;
436 CC_ALGO(tp)->cong_signal(tp->ccv, type);
437 }
438}
439
440static void inline
441cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
442{
443 INP_WLOCK_ASSERT(tp->t_inpcb);
444
445 /* XXXLAS: KASSERT that we're in recovery? */
446
447 if (CC_ALGO(tp)->post_recovery != NULL) {
448 tp->ccv->curack = th->th_ack;
449 CC_ALGO(tp)->post_recovery(tp->ccv);
450 }
451 /* XXXLAS: EXIT_RECOVERY ? */
452 tp->t_bytes_acked = 0;
453}
454
455static inline void
456tcp_fields_to_host(struct tcphdr *th)
457{
458
459 th->th_seq = ntohl(th->th_seq);
460 th->th_ack = ntohl(th->th_ack);
461 th->th_win = ntohs(th->th_win);
462 th->th_urp = ntohs(th->th_urp);
463}
464
465#ifdef TCP_SIGNATURE
466static inline void
467tcp_fields_to_net(struct tcphdr *th)
468{
469
470 th->th_seq = htonl(th->th_seq);
471 th->th_ack = htonl(th->th_ack);
472 th->th_win = htons(th->th_win);
473 th->th_urp = htons(th->th_urp);
474}
475
476static inline int
477tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen,
478 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
479{
480 int ret;
481
482 tcp_fields_to_net(th);
483 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag);
484 tcp_fields_to_host(th);
485 return (ret);
486}
487#endif
488
489/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
490#ifdef INET6
491#define ND6_HINT(tp) \
492do { \
493 if ((tp) && (tp)->t_inpcb && \
494 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
495 nd6_nud_hint(NULL, NULL, 0); \
496} while (0)
497#else
498#define ND6_HINT(tp)
499#endif
500
501/*
502 * Indicate whether this ack should be delayed. We can delay the ack if
503 * - there is no delayed ack timer in progress and
504 * - our last ack wasn't a 0-sized window. We never want to delay
505 * the ack that opens up a 0-sized window and
506 * - delayed acks are enabled or
507 * - this is a half-synchronized T/TCP connection.
508 */
509#define DELAY_ACK(tp) \
510 ((!tcp_timer_active(tp, TT_DELACK) && \
511 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
512 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
513
514/*
515 * TCP input handling is split into multiple parts:
516 * tcp6_input is a thin wrapper around tcp_input for the extended
517 * ip6_protox[] call format in ip6_input
518 * tcp_input handles primary segment validation, inpcb lookup and
519 * SYN processing on listen sockets
520 * tcp_do_segment processes the ACK and text of the segment for
521 * establishing, established and closing connections
522 */
523#ifdef INET6
524int
525tcp6_input(struct mbuf **mp, int *offp, int proto)
526{
527 struct mbuf *m = *mp;
528 struct in6_ifaddr *ia6;
529
530 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
531
532 /*
533 * draft-itojun-ipv6-tcp-to-anycast
534 * better place to put this in?
535 */
536 ia6 = ip6_getdstifaddr(m);
537 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
538 struct ip6_hdr *ip6;
539
540 ifa_free(&ia6->ia_ifa);
541 ip6 = mtod(m, struct ip6_hdr *);
542 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
543 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
544 return IPPROTO_DONE;
545 }
546 if (ia6)
547 ifa_free(&ia6->ia_ifa);
548
549 tcp_input(m, *offp);
550 return IPPROTO_DONE;
551}
552#endif /* INET6 */
553
554void
555tcp_input(struct mbuf *m, int off0)
556{
557 struct tcphdr *th = NULL;
558 struct ip *ip = NULL;
559 struct inpcb *inp = NULL;
560 struct tcpcb *tp = NULL;
561 struct socket *so = NULL;
562 u_char *optp = NULL;
563 int optlen = 0;
564#ifdef INET
565 int len;
566#endif
567 int tlen = 0, off;
568 int drop_hdrlen;
569 int thflags;
570 int rstreason = 0; /* For badport_bandlim accounting purposes */
571#ifdef TCP_SIGNATURE
572 uint8_t sig_checked = 0;
573#endif
574 uint8_t iptos = 0;
575 struct m_tag *fwd_tag = NULL;
576#ifdef INET6
577 struct ip6_hdr *ip6 = NULL;
578 int isipv6;
579#else
580 const void *ip6 = NULL;
581#endif /* INET6 */
582 struct tcpopt to; /* options in this segment */
583 char *s = NULL; /* address and port logging */
584 int ti_locked;
585#define TI_UNLOCKED 1
586#define TI_WLOCKED 2
587
588#ifdef TCPDEBUG
589 /*
590 * The size of tcp_saveipgen must be the size of the max ip header,
591 * now IPv6.
592 */
593 u_char tcp_saveipgen[IP6_HDR_LEN];
594 struct tcphdr tcp_savetcp;
595 short ostate = 0;
596#endif
597
598#ifdef INET6
599 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
600#endif
601
602 to.to_flags = 0;
603 TCPSTAT_INC(tcps_rcvtotal);
604
605#ifdef INET6
606 if (isipv6) {
607 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
608
609 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
610 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
611 if (m == NULL) {
612 TCPSTAT_INC(tcps_rcvshort);
613 return;
614 }
615 }
616
617 ip6 = mtod(m, struct ip6_hdr *);
618 th = (struct tcphdr *)((caddr_t)ip6 + off0);
619 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
620 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
621 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
622 th->th_sum = m->m_pkthdr.csum_data;
623 else
624 th->th_sum = in6_cksum_pseudo(ip6, tlen,
625 IPPROTO_TCP, m->m_pkthdr.csum_data);
626 th->th_sum ^= 0xffff;
627 } else
628 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
629 if (th->th_sum) {
630 TCPSTAT_INC(tcps_rcvbadsum);
631 goto drop;
632 }
633
634 /*
635 * Be proactive about unspecified IPv6 address in source.
636 * As we use all-zero to indicate unbounded/unconnected pcb,
637 * unspecified IPv6 address can be used to confuse us.
638 *
639 * Note that packets with unspecified IPv6 destination is
640 * already dropped in ip6_input.
641 */
642 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
643 /* XXX stat */
644 goto drop;
645 }
646 }
647#endif
648#if defined(INET) && defined(INET6)
649 else
650#endif
651#ifdef INET
652 {
653 /*
654 * Get IP and TCP header together in first mbuf.
655 * Note: IP leaves IP header in first mbuf.
656 */
657 if (off0 > sizeof (struct ip)) {
658 ip_stripoptions(m);
659 off0 = sizeof(struct ip);
660 }
661 if (m->m_len < sizeof (struct tcpiphdr)) {
662 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
663 == NULL) {
664 TCPSTAT_INC(tcps_rcvshort);
665 return;
666 }
667 }
668 ip = mtod(m, struct ip *);
669 th = (struct tcphdr *)((caddr_t)ip + off0);
670 tlen = ntohs(ip->ip_len) - off0;
671
672 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
673 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
674 th->th_sum = m->m_pkthdr.csum_data;
675 else
676 th->th_sum = in_pseudo(ip->ip_src.s_addr,
677 ip->ip_dst.s_addr,
678 htonl(m->m_pkthdr.csum_data + tlen +
679 IPPROTO_TCP));
680 th->th_sum ^= 0xffff;
681 } else {
682 struct ipovly *ipov = (struct ipovly *)ip;
683
684 /*
685 * Checksum extended TCP header and data.
686 */
687 len = off0 + tlen;
688 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
689 ipov->ih_len = htons(tlen);
690 th->th_sum = in_cksum(m, len);
88#include <netinet/in_pcb.h>
89#include <netinet/in_systm.h>
90#include <netinet/in_var.h>
91#include <netinet/ip.h>
92#include <netinet/ip_icmp.h> /* required for icmp_var.h */
93#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
94#include <netinet/ip_var.h>
95#include <netinet/ip_options.h>
96#include <netinet/ip6.h>
97#include <netinet/icmp6.h>
98#include <netinet6/in6_pcb.h>
99#include <netinet6/ip6_var.h>
100#include <netinet6/nd6.h>
101#include <netinet/tcp_fsm.h>
102#include <netinet/tcp_seq.h>
103#include <netinet/tcp_timer.h>
104#include <netinet/tcp_var.h>
105#include <netinet6/tcp6_var.h>
106#include <netinet/tcpip.h>
107#include <netinet/tcp_syncache.h>
108#ifdef TCPDEBUG
109#include <netinet/tcp_debug.h>
110#endif /* TCPDEBUG */
111#ifdef TCP_OFFLOAD
112#include <netinet/tcp_offload.h>
113#endif
114
115#ifdef IPSEC
116#include <netipsec/ipsec.h>
117#include <netipsec/ipsec6.h>
118#endif /*IPSEC*/
119
120#include <machine/in_cksum.h>
121
122#include <security/mac/mac_framework.h>
123
124const int tcprexmtthresh = 3;
125
126int tcp_log_in_vain = 0;
127SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
128 &tcp_log_in_vain, 0,
129 "Log all incoming TCP segments to closed ports");
130
131VNET_DEFINE(int, blackhole) = 0;
132#define V_blackhole VNET(blackhole)
133SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
134 &VNET_NAME(blackhole), 0,
135 "Do not send RST on segments to closed ports");
136
137VNET_DEFINE(int, tcp_delack_enabled) = 1;
138SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
139 &VNET_NAME(tcp_delack_enabled), 0,
140 "Delay ACK to try and piggyback it onto a data packet");
141
142VNET_DEFINE(int, drop_synfin) = 0;
143#define V_drop_synfin VNET(drop_synfin)
144SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
145 &VNET_NAME(drop_synfin), 0,
146 "Drop TCP packets with SYN+FIN set");
147
148VNET_DEFINE(int, tcp_do_rfc3042) = 1;
149#define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
150SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
151 &VNET_NAME(tcp_do_rfc3042), 0,
152 "Enable RFC 3042 (Limited Transmit)");
153
154VNET_DEFINE(int, tcp_do_rfc3390) = 1;
155SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
156 &VNET_NAME(tcp_do_rfc3390), 0,
157 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
158
159SYSCTL_NODE(_net_inet_tcp, OID_AUTO, experimental, CTLFLAG_RW, 0,
160 "Experimental TCP extensions");
161
162VNET_DEFINE(int, tcp_do_initcwnd10) = 1;
163SYSCTL_VNET_INT(_net_inet_tcp_experimental, OID_AUTO, initcwnd10, CTLFLAG_RW,
164 &VNET_NAME(tcp_do_initcwnd10), 0,
165 "Enable draft-ietf-tcpm-initcwnd-05 (Increasing initial CWND to 10)");
166
167VNET_DEFINE(int, tcp_do_rfc3465) = 1;
168SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW,
169 &VNET_NAME(tcp_do_rfc3465), 0,
170 "Enable RFC 3465 (Appropriate Byte Counting)");
171
172VNET_DEFINE(int, tcp_abc_l_var) = 2;
173SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW,
174 &VNET_NAME(tcp_abc_l_var), 2,
175 "Cap the max cwnd increment during slow-start to this number of segments");
176
177static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
178
179VNET_DEFINE(int, tcp_do_ecn) = 0;
180SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW,
181 &VNET_NAME(tcp_do_ecn), 0,
182 "TCP ECN support");
183
184VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
185SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW,
186 &VNET_NAME(tcp_ecn_maxretries), 0,
187 "Max retries before giving up on ECN");
188
189VNET_DEFINE(int, tcp_insecure_rst) = 0;
190#define V_tcp_insecure_rst VNET(tcp_insecure_rst)
191SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
192 &VNET_NAME(tcp_insecure_rst), 0,
193 "Follow the old (insecure) criteria for accepting RST packets");
194
195VNET_DEFINE(int, tcp_recvspace) = 1024*64;
196#define V_tcp_recvspace VNET(tcp_recvspace)
197SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
198 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size");
199
200VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
201#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
202SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
203 &VNET_NAME(tcp_do_autorcvbuf), 0,
204 "Enable automatic receive buffer sizing");
205
206VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
207#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
208SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
209 &VNET_NAME(tcp_autorcvbuf_inc), 0,
210 "Incrementor step size of automatic receive buffer");
211
212VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
213#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
214SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
215 &VNET_NAME(tcp_autorcvbuf_max), 0,
216 "Max size of automatic receive buffer");
217
218VNET_DEFINE(struct inpcbhead, tcb);
219#define tcb6 tcb /* for KAME src sync over BSD*'s */
220VNET_DEFINE(struct inpcbinfo, tcbinfo);
221
222static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
223static void tcp_do_segment(struct mbuf *, struct tcphdr *,
224 struct socket *, struct tcpcb *, int, int, uint8_t,
225 int);
226static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
227 struct tcpcb *, int, int);
228static void tcp_pulloutofband(struct socket *,
229 struct tcphdr *, struct mbuf *, int);
230static void tcp_xmit_timer(struct tcpcb *, int);
231static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
232static void inline tcp_fields_to_host(struct tcphdr *);
233#ifdef TCP_SIGNATURE
234static void inline tcp_fields_to_net(struct tcphdr *);
235static int inline tcp_signature_verify_input(struct mbuf *, int, int,
236 int, struct tcpopt *, struct tcphdr *, u_int);
237#endif
238static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th,
239 uint16_t type);
240static void inline cc_conn_init(struct tcpcb *tp);
241static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th);
242static void inline hhook_run_tcp_est_in(struct tcpcb *tp,
243 struct tcphdr *th, struct tcpopt *to);
244
245/*
246 * TCP statistics are stored in an "array" of counter(9)s.
247 */
248VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat);
249VNET_PCPUSTAT_SYSINIT(tcpstat);
250SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat,
251 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
252
253#ifdef VIMAGE
254VNET_PCPUSTAT_SYSUNINIT(tcpstat);
255#endif /* VIMAGE */
256/*
257 * Kernel module interface for updating tcpstat. The argument is an index
258 * into tcpstat treated as an array.
259 */
260void
261kmod_tcpstat_inc(int statnum)
262{
263
264 counter_u64_add(VNET(tcpstat)[statnum], 1);
265}
266
267/*
268 * Wrapper for the TCP established input helper hook.
269 */
270static void inline
271hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
272{
273 struct tcp_hhook_data hhook_data;
274
275 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) {
276 hhook_data.tp = tp;
277 hhook_data.th = th;
278 hhook_data.to = to;
279
280 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data,
281 tp->osd);
282 }
283}
284
285/*
286 * CC wrapper hook functions
287 */
288static void inline
289cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type)
290{
291 INP_WLOCK_ASSERT(tp->t_inpcb);
292
293 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
294 if (tp->snd_cwnd <= tp->snd_wnd)
295 tp->ccv->flags |= CCF_CWND_LIMITED;
296 else
297 tp->ccv->flags &= ~CCF_CWND_LIMITED;
298
299 if (type == CC_ACK) {
300 if (tp->snd_cwnd > tp->snd_ssthresh) {
301 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
302 V_tcp_abc_l_var * tp->t_maxseg);
303 if (tp->t_bytes_acked >= tp->snd_cwnd) {
304 tp->t_bytes_acked -= tp->snd_cwnd;
305 tp->ccv->flags |= CCF_ABC_SENTAWND;
306 }
307 } else {
308 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
309 tp->t_bytes_acked = 0;
310 }
311 }
312
313 if (CC_ALGO(tp)->ack_received != NULL) {
314 /* XXXLAS: Find a way to live without this */
315 tp->ccv->curack = th->th_ack;
316 CC_ALGO(tp)->ack_received(tp->ccv, type);
317 }
318}
319
320static void inline
321cc_conn_init(struct tcpcb *tp)
322{
323 struct hc_metrics_lite metrics;
324 struct inpcb *inp = tp->t_inpcb;
325 int rtt;
326
327 INP_WLOCK_ASSERT(tp->t_inpcb);
328
329 tcp_hc_get(&inp->inp_inc, &metrics);
330
331 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
332 tp->t_srtt = rtt;
333 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
334 TCPSTAT_INC(tcps_usedrtt);
335 if (metrics.rmx_rttvar) {
336 tp->t_rttvar = metrics.rmx_rttvar;
337 TCPSTAT_INC(tcps_usedrttvar);
338 } else {
339 /* default variation is +- 1 rtt */
340 tp->t_rttvar =
341 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
342 }
343 TCPT_RANGESET(tp->t_rxtcur,
344 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
345 tp->t_rttmin, TCPTV_REXMTMAX);
346 }
347 if (metrics.rmx_ssthresh) {
348 /*
349 * There's some sort of gateway or interface
350 * buffer limit on the path. Use this to set
351 * the slow start threshhold, but set the
352 * threshold to no less than 2*mss.
353 */
354 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh);
355 TCPSTAT_INC(tcps_usedssthresh);
356 }
357
358 /*
359 * Set the initial slow-start flight size.
360 *
361 * RFC5681 Section 3.1 specifies the default conservative values.
362 * RFC3390 specifies slightly more aggressive values.
363 * Draft-ietf-tcpm-initcwnd-05 increases it to ten segments.
364 *
365 * If a SYN or SYN/ACK was lost and retransmitted, we have to
366 * reduce the initial CWND to one segment as congestion is likely
367 * requiring us to be cautious.
368 */
369 if (tp->snd_cwnd == 1)
370 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
371 else if (V_tcp_do_initcwnd10)
372 tp->snd_cwnd = min(10 * tp->t_maxseg,
373 max(2 * tp->t_maxseg, 14600));
374 else if (V_tcp_do_rfc3390)
375 tp->snd_cwnd = min(4 * tp->t_maxseg,
376 max(2 * tp->t_maxseg, 4380));
377 else {
378 /* Per RFC5681 Section 3.1 */
379 if (tp->t_maxseg > 2190)
380 tp->snd_cwnd = 2 * tp->t_maxseg;
381 else if (tp->t_maxseg > 1095)
382 tp->snd_cwnd = 3 * tp->t_maxseg;
383 else
384 tp->snd_cwnd = 4 * tp->t_maxseg;
385 }
386
387 if (CC_ALGO(tp)->conn_init != NULL)
388 CC_ALGO(tp)->conn_init(tp->ccv);
389}
390
391void inline
392cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
393{
394 INP_WLOCK_ASSERT(tp->t_inpcb);
395
396 switch(type) {
397 case CC_NDUPACK:
398 if (!IN_FASTRECOVERY(tp->t_flags)) {
399 tp->snd_recover = tp->snd_max;
400 if (tp->t_flags & TF_ECN_PERMIT)
401 tp->t_flags |= TF_ECN_SND_CWR;
402 }
403 break;
404 case CC_ECN:
405 if (!IN_CONGRECOVERY(tp->t_flags)) {
406 TCPSTAT_INC(tcps_ecn_rcwnd);
407 tp->snd_recover = tp->snd_max;
408 if (tp->t_flags & TF_ECN_PERMIT)
409 tp->t_flags |= TF_ECN_SND_CWR;
410 }
411 break;
412 case CC_RTO:
413 tp->t_dupacks = 0;
414 tp->t_bytes_acked = 0;
415 EXIT_RECOVERY(tp->t_flags);
416 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
417 tp->t_maxseg) * tp->t_maxseg;
418 tp->snd_cwnd = tp->t_maxseg;
419 break;
420 case CC_RTO_ERR:
421 TCPSTAT_INC(tcps_sndrexmitbad);
422 /* RTO was unnecessary, so reset everything. */
423 tp->snd_cwnd = tp->snd_cwnd_prev;
424 tp->snd_ssthresh = tp->snd_ssthresh_prev;
425 tp->snd_recover = tp->snd_recover_prev;
426 if (tp->t_flags & TF_WASFRECOVERY)
427 ENTER_FASTRECOVERY(tp->t_flags);
428 if (tp->t_flags & TF_WASCRECOVERY)
429 ENTER_CONGRECOVERY(tp->t_flags);
430 tp->snd_nxt = tp->snd_max;
431 tp->t_flags &= ~TF_PREVVALID;
432 tp->t_badrxtwin = 0;
433 break;
434 }
435
436 if (CC_ALGO(tp)->cong_signal != NULL) {
437 if (th != NULL)
438 tp->ccv->curack = th->th_ack;
439 CC_ALGO(tp)->cong_signal(tp->ccv, type);
440 }
441}
442
443static void inline
444cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
445{
446 INP_WLOCK_ASSERT(tp->t_inpcb);
447
448 /* XXXLAS: KASSERT that we're in recovery? */
449
450 if (CC_ALGO(tp)->post_recovery != NULL) {
451 tp->ccv->curack = th->th_ack;
452 CC_ALGO(tp)->post_recovery(tp->ccv);
453 }
454 /* XXXLAS: EXIT_RECOVERY ? */
455 tp->t_bytes_acked = 0;
456}
457
458static inline void
459tcp_fields_to_host(struct tcphdr *th)
460{
461
462 th->th_seq = ntohl(th->th_seq);
463 th->th_ack = ntohl(th->th_ack);
464 th->th_win = ntohs(th->th_win);
465 th->th_urp = ntohs(th->th_urp);
466}
467
468#ifdef TCP_SIGNATURE
469static inline void
470tcp_fields_to_net(struct tcphdr *th)
471{
472
473 th->th_seq = htonl(th->th_seq);
474 th->th_ack = htonl(th->th_ack);
475 th->th_win = htons(th->th_win);
476 th->th_urp = htons(th->th_urp);
477}
478
479static inline int
480tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen,
481 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag)
482{
483 int ret;
484
485 tcp_fields_to_net(th);
486 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag);
487 tcp_fields_to_host(th);
488 return (ret);
489}
490#endif
491
492/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
493#ifdef INET6
494#define ND6_HINT(tp) \
495do { \
496 if ((tp) && (tp)->t_inpcb && \
497 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
498 nd6_nud_hint(NULL, NULL, 0); \
499} while (0)
500#else
501#define ND6_HINT(tp)
502#endif
503
504/*
505 * Indicate whether this ack should be delayed. We can delay the ack if
506 * - there is no delayed ack timer in progress and
507 * - our last ack wasn't a 0-sized window. We never want to delay
508 * the ack that opens up a 0-sized window and
509 * - delayed acks are enabled or
510 * - this is a half-synchronized T/TCP connection.
511 */
512#define DELAY_ACK(tp) \
513 ((!tcp_timer_active(tp, TT_DELACK) && \
514 (tp->t_flags & TF_RXWIN0SENT) == 0) && \
515 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
516
517/*
518 * TCP input handling is split into multiple parts:
519 * tcp6_input is a thin wrapper around tcp_input for the extended
520 * ip6_protox[] call format in ip6_input
521 * tcp_input handles primary segment validation, inpcb lookup and
522 * SYN processing on listen sockets
523 * tcp_do_segment processes the ACK and text of the segment for
524 * establishing, established and closing connections
525 */
526#ifdef INET6
527int
528tcp6_input(struct mbuf **mp, int *offp, int proto)
529{
530 struct mbuf *m = *mp;
531 struct in6_ifaddr *ia6;
532
533 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
534
535 /*
536 * draft-itojun-ipv6-tcp-to-anycast
537 * better place to put this in?
538 */
539 ia6 = ip6_getdstifaddr(m);
540 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
541 struct ip6_hdr *ip6;
542
543 ifa_free(&ia6->ia_ifa);
544 ip6 = mtod(m, struct ip6_hdr *);
545 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
546 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
547 return IPPROTO_DONE;
548 }
549 if (ia6)
550 ifa_free(&ia6->ia_ifa);
551
552 tcp_input(m, *offp);
553 return IPPROTO_DONE;
554}
555#endif /* INET6 */
556
557void
558tcp_input(struct mbuf *m, int off0)
559{
560 struct tcphdr *th = NULL;
561 struct ip *ip = NULL;
562 struct inpcb *inp = NULL;
563 struct tcpcb *tp = NULL;
564 struct socket *so = NULL;
565 u_char *optp = NULL;
566 int optlen = 0;
567#ifdef INET
568 int len;
569#endif
570 int tlen = 0, off;
571 int drop_hdrlen;
572 int thflags;
573 int rstreason = 0; /* For badport_bandlim accounting purposes */
574#ifdef TCP_SIGNATURE
575 uint8_t sig_checked = 0;
576#endif
577 uint8_t iptos = 0;
578 struct m_tag *fwd_tag = NULL;
579#ifdef INET6
580 struct ip6_hdr *ip6 = NULL;
581 int isipv6;
582#else
583 const void *ip6 = NULL;
584#endif /* INET6 */
585 struct tcpopt to; /* options in this segment */
586 char *s = NULL; /* address and port logging */
587 int ti_locked;
588#define TI_UNLOCKED 1
589#define TI_WLOCKED 2
590
591#ifdef TCPDEBUG
592 /*
593 * The size of tcp_saveipgen must be the size of the max ip header,
594 * now IPv6.
595 */
596 u_char tcp_saveipgen[IP6_HDR_LEN];
597 struct tcphdr tcp_savetcp;
598 short ostate = 0;
599#endif
600
601#ifdef INET6
602 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
603#endif
604
605 to.to_flags = 0;
606 TCPSTAT_INC(tcps_rcvtotal);
607
608#ifdef INET6
609 if (isipv6) {
610 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
611
612 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
613 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
614 if (m == NULL) {
615 TCPSTAT_INC(tcps_rcvshort);
616 return;
617 }
618 }
619
620 ip6 = mtod(m, struct ip6_hdr *);
621 th = (struct tcphdr *)((caddr_t)ip6 + off0);
622 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
623 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
624 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
625 th->th_sum = m->m_pkthdr.csum_data;
626 else
627 th->th_sum = in6_cksum_pseudo(ip6, tlen,
628 IPPROTO_TCP, m->m_pkthdr.csum_data);
629 th->th_sum ^= 0xffff;
630 } else
631 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen);
632 if (th->th_sum) {
633 TCPSTAT_INC(tcps_rcvbadsum);
634 goto drop;
635 }
636
637 /*
638 * Be proactive about unspecified IPv6 address in source.
639 * As we use all-zero to indicate unbounded/unconnected pcb,
640 * unspecified IPv6 address can be used to confuse us.
641 *
642 * Note that packets with unspecified IPv6 destination is
643 * already dropped in ip6_input.
644 */
645 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
646 /* XXX stat */
647 goto drop;
648 }
649 }
650#endif
651#if defined(INET) && defined(INET6)
652 else
653#endif
654#ifdef INET
655 {
656 /*
657 * Get IP and TCP header together in first mbuf.
658 * Note: IP leaves IP header in first mbuf.
659 */
660 if (off0 > sizeof (struct ip)) {
661 ip_stripoptions(m);
662 off0 = sizeof(struct ip);
663 }
664 if (m->m_len < sizeof (struct tcpiphdr)) {
665 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
666 == NULL) {
667 TCPSTAT_INC(tcps_rcvshort);
668 return;
669 }
670 }
671 ip = mtod(m, struct ip *);
672 th = (struct tcphdr *)((caddr_t)ip + off0);
673 tlen = ntohs(ip->ip_len) - off0;
674
675 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
676 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
677 th->th_sum = m->m_pkthdr.csum_data;
678 else
679 th->th_sum = in_pseudo(ip->ip_src.s_addr,
680 ip->ip_dst.s_addr,
681 htonl(m->m_pkthdr.csum_data + tlen +
682 IPPROTO_TCP));
683 th->th_sum ^= 0xffff;
684 } else {
685 struct ipovly *ipov = (struct ipovly *)ip;
686
687 /*
688 * Checksum extended TCP header and data.
689 */
690 len = off0 + tlen;
691 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
692 ipov->ih_len = htons(tlen);
693 th->th_sum = in_cksum(m, len);
694 /* Reset length for SDT probes. */
695 ip->ip_len = htons(tlen + off0);
691 }
696 }
697
692 if (th->th_sum) {
693 TCPSTAT_INC(tcps_rcvbadsum);
694 goto drop;
695 }
696 /* Re-initialization for later version check */
697 ip->ip_v = IPVERSION;
698 }
699#endif /* INET */
700
701#ifdef INET6
702 if (isipv6)
703 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
704#endif
705#if defined(INET) && defined(INET6)
706 else
707#endif
708#ifdef INET
709 iptos = ip->ip_tos;
710#endif
711
712 /*
713 * Check that TCP offset makes sense,
714 * pull out TCP options and adjust length. XXX
715 */
716 off = th->th_off << 2;
717 if (off < sizeof (struct tcphdr) || off > tlen) {
718 TCPSTAT_INC(tcps_rcvbadoff);
719 goto drop;
720 }
721 tlen -= off; /* tlen is used instead of ti->ti_len */
722 if (off > sizeof (struct tcphdr)) {
723#ifdef INET6
724 if (isipv6) {
725 IP6_EXTHDR_CHECK(m, off0, off, );
726 ip6 = mtod(m, struct ip6_hdr *);
727 th = (struct tcphdr *)((caddr_t)ip6 + off0);
728 }
729#endif
730#if defined(INET) && defined(INET6)
731 else
732#endif
733#ifdef INET
734 {
735 if (m->m_len < sizeof(struct ip) + off) {
736 if ((m = m_pullup(m, sizeof (struct ip) + off))
737 == NULL) {
738 TCPSTAT_INC(tcps_rcvshort);
739 return;
740 }
741 ip = mtod(m, struct ip *);
742 th = (struct tcphdr *)((caddr_t)ip + off0);
743 }
744 }
745#endif
746 optlen = off - sizeof (struct tcphdr);
747 optp = (u_char *)(th + 1);
748 }
749 thflags = th->th_flags;
750
751 /*
752 * Convert TCP protocol specific fields to host format.
753 */
754 tcp_fields_to_host(th);
755
756 /*
757 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
758 */
759 drop_hdrlen = off0 + off;
760
761 /*
762 * Locate pcb for segment; if we're likely to add or remove a
763 * connection then first acquire pcbinfo lock. There are two cases
764 * where we might discover later we need a write lock despite the
765 * flags: ACKs moving a connection out of the syncache, and ACKs for
766 * a connection in TIMEWAIT.
767 */
768 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) {
769 INP_INFO_WLOCK(&V_tcbinfo);
770 ti_locked = TI_WLOCKED;
771 } else
772 ti_locked = TI_UNLOCKED;
773
774 /*
775 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
776 */
777 if (
778#ifdef INET6
779 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
780#ifdef INET
781 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
782#endif
783#endif
784#if defined(INET) && !defined(INET6)
785 (m->m_flags & M_IP_NEXTHOP)
786#endif
787 )
788 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
789
790findpcb:
791#ifdef INVARIANTS
792 if (ti_locked == TI_WLOCKED) {
793 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
794 } else {
795 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
796 }
797#endif
798#ifdef INET6
799 if (isipv6 && fwd_tag != NULL) {
800 struct sockaddr_in6 *next_hop6;
801
802 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
803 /*
804 * Transparently forwarded. Pretend to be the destination.
805 * Already got one like this?
806 */
807 inp = in6_pcblookup_mbuf(&V_tcbinfo,
808 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
809 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
810 if (!inp) {
811 /*
812 * It's new. Try to find the ambushing socket.
813 * Because we've rewritten the destination address,
814 * any hardware-generated hash is ignored.
815 */
816 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
817 th->th_sport, &next_hop6->sin6_addr,
818 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
819 th->th_dport, INPLOOKUP_WILDCARD |
820 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
821 }
822 } else if (isipv6) {
823 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
824 th->th_sport, &ip6->ip6_dst, th->th_dport,
825 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
826 m->m_pkthdr.rcvif, m);
827 }
828#endif /* INET6 */
829#if defined(INET6) && defined(INET)
830 else
831#endif
832#ifdef INET
833 if (fwd_tag != NULL) {
834 struct sockaddr_in *next_hop;
835
836 next_hop = (struct sockaddr_in *)(fwd_tag+1);
837 /*
838 * Transparently forwarded. Pretend to be the destination.
839 * already got one like this?
840 */
841 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
842 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
843 m->m_pkthdr.rcvif, m);
844 if (!inp) {
845 /*
846 * It's new. Try to find the ambushing socket.
847 * Because we've rewritten the destination address,
848 * any hardware-generated hash is ignored.
849 */
850 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
851 th->th_sport, next_hop->sin_addr,
852 next_hop->sin_port ? ntohs(next_hop->sin_port) :
853 th->th_dport, INPLOOKUP_WILDCARD |
854 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
855 }
856 } else
857 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
858 th->th_sport, ip->ip_dst, th->th_dport,
859 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
860 m->m_pkthdr.rcvif, m);
861#endif /* INET */
862
863 /*
864 * If the INPCB does not exist then all data in the incoming
865 * segment is discarded and an appropriate RST is sent back.
866 * XXX MRT Send RST using which routing table?
867 */
868 if (inp == NULL) {
869 /*
870 * Log communication attempts to ports that are not
871 * in use.
872 */
873 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
874 tcp_log_in_vain == 2) {
875 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
876 log(LOG_INFO, "%s; %s: Connection attempt "
877 "to closed port\n", s, __func__);
878 }
879 /*
880 * When blackholing do not respond with a RST but
881 * completely ignore the segment and drop it.
882 */
883 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
884 V_blackhole == 2)
885 goto dropunlock;
886
887 rstreason = BANDLIM_RST_CLOSEDPORT;
888 goto dropwithreset;
889 }
890 INP_WLOCK_ASSERT(inp);
891 if (!(inp->inp_flags & INP_HW_FLOWID)
892 && (m->m_flags & M_FLOWID)
893 && ((inp->inp_socket == NULL)
894 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) {
895 inp->inp_flags |= INP_HW_FLOWID;
896 inp->inp_flags &= ~INP_SW_FLOWID;
897 inp->inp_flowid = m->m_pkthdr.flowid;
898 }
899#ifdef IPSEC
900#ifdef INET6
901 if (isipv6 && ipsec6_in_reject(m, inp)) {
902 IPSEC6STAT_INC(ips_in_polvio);
903 goto dropunlock;
904 } else
905#endif /* INET6 */
906 if (ipsec4_in_reject(m, inp) != 0) {
907 IPSECSTAT_INC(ips_in_polvio);
908 goto dropunlock;
909 }
910#endif /* IPSEC */
911
912 /*
913 * Check the minimum TTL for socket.
914 */
915 if (inp->inp_ip_minttl != 0) {
916#ifdef INET6
917 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
918 goto dropunlock;
919 else
920#endif
921 if (inp->inp_ip_minttl > ip->ip_ttl)
922 goto dropunlock;
923 }
924
925 /*
926 * A previous connection in TIMEWAIT state is supposed to catch stray
927 * or duplicate segments arriving late. If this segment was a
928 * legitimate new connection attempt, the old INPCB gets removed and
929 * we can try again to find a listening socket.
930 *
931 * At this point, due to earlier optimism, we may hold only an inpcb
932 * lock, and not the inpcbinfo write lock. If so, we need to try to
933 * acquire it, or if that fails, acquire a reference on the inpcb,
934 * drop all locks, acquire a global write lock, and then re-acquire
935 * the inpcb lock. We may at that point discover that another thread
936 * has tried to free the inpcb, in which case we need to loop back
937 * and try to find a new inpcb to deliver to.
938 *
939 * XXXRW: It may be time to rethink timewait locking.
940 */
941relocked:
942 if (inp->inp_flags & INP_TIMEWAIT) {
943 if (ti_locked == TI_UNLOCKED) {
944 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
945 in_pcbref(inp);
946 INP_WUNLOCK(inp);
947 INP_INFO_WLOCK(&V_tcbinfo);
948 ti_locked = TI_WLOCKED;
949 INP_WLOCK(inp);
950 if (in_pcbrele_wlocked(inp)) {
951 inp = NULL;
952 goto findpcb;
953 }
954 } else
955 ti_locked = TI_WLOCKED;
956 }
957 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
958
959 if (thflags & TH_SYN)
960 tcp_dooptions(&to, optp, optlen, TO_SYN);
961 /*
962 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
963 */
964 if (tcp_twcheck(inp, &to, th, m, tlen))
965 goto findpcb;
966 INP_INFO_WUNLOCK(&V_tcbinfo);
967 return;
968 }
969 /*
970 * The TCPCB may no longer exist if the connection is winding
971 * down or it is in the CLOSED state. Either way we drop the
972 * segment and send an appropriate response.
973 */
974 tp = intotcpcb(inp);
975 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
976 rstreason = BANDLIM_RST_CLOSEDPORT;
977 goto dropwithreset;
978 }
979
980#ifdef TCP_OFFLOAD
981 if (tp->t_flags & TF_TOE) {
982 tcp_offload_input(tp, m);
983 m = NULL; /* consumed by the TOE driver */
984 goto dropunlock;
985 }
986#endif
987
988 /*
989 * We've identified a valid inpcb, but it could be that we need an
990 * inpcbinfo write lock but don't hold it. In this case, attempt to
991 * acquire using the same strategy as the TIMEWAIT case above. If we
992 * relock, we have to jump back to 'relocked' as the connection might
993 * now be in TIMEWAIT.
994 */
995#ifdef INVARIANTS
996 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0)
997 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
998#endif
999 if (tp->t_state != TCPS_ESTABLISHED) {
1000 if (ti_locked == TI_UNLOCKED) {
1001 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
1002 in_pcbref(inp);
1003 INP_WUNLOCK(inp);
1004 INP_INFO_WLOCK(&V_tcbinfo);
1005 ti_locked = TI_WLOCKED;
1006 INP_WLOCK(inp);
1007 if (in_pcbrele_wlocked(inp)) {
1008 inp = NULL;
1009 goto findpcb;
1010 }
1011 goto relocked;
1012 } else
1013 ti_locked = TI_WLOCKED;
1014 }
1015 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1016 }
1017
1018#ifdef MAC
1019 INP_WLOCK_ASSERT(inp);
1020 if (mac_inpcb_check_deliver(inp, m))
1021 goto dropunlock;
1022#endif
1023 so = inp->inp_socket;
1024 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1025#ifdef TCPDEBUG
1026 if (so->so_options & SO_DEBUG) {
1027 ostate = tp->t_state;
1028#ifdef INET6
1029 if (isipv6) {
1030 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1031 } else
1032#endif
1033 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1034 tcp_savetcp = *th;
1035 }
1036#endif /* TCPDEBUG */
1037 /*
1038 * When the socket is accepting connections (the INPCB is in LISTEN
1039 * state) we look into the SYN cache if this is a new connection
1040 * attempt or the completion of a previous one. Because listen
1041 * sockets are never in TCPS_ESTABLISHED, the V_tcbinfo lock will be
1042 * held in this case.
1043 */
1044 if (so->so_options & SO_ACCEPTCONN) {
1045 struct in_conninfo inc;
1046
1047 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
1048 "tp not listening", __func__));
1049 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1050
1051 bzero(&inc, sizeof(inc));
1052#ifdef INET6
1053 if (isipv6) {
1054 inc.inc_flags |= INC_ISIPV6;
1055 inc.inc6_faddr = ip6->ip6_src;
1056 inc.inc6_laddr = ip6->ip6_dst;
1057 } else
1058#endif
1059 {
1060 inc.inc_faddr = ip->ip_src;
1061 inc.inc_laddr = ip->ip_dst;
1062 }
1063 inc.inc_fport = th->th_sport;
1064 inc.inc_lport = th->th_dport;
1065 inc.inc_fibnum = so->so_fibnum;
1066
1067 /*
1068 * Check for an existing connection attempt in syncache if
1069 * the flag is only ACK. A successful lookup creates a new
1070 * socket appended to the listen queue in SYN_RECEIVED state.
1071 */
1072 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1073 /*
1074 * Parse the TCP options here because
1075 * syncookies need access to the reflected
1076 * timestamp.
1077 */
1078 tcp_dooptions(&to, optp, optlen, 0);
1079 /*
1080 * NB: syncache_expand() doesn't unlock
1081 * inp and tcpinfo locks.
1082 */
1083 if (!syncache_expand(&inc, &to, th, &so, m)) {
1084 /*
1085 * No syncache entry or ACK was not
1086 * for our SYN/ACK. Send a RST.
1087 * NB: syncache did its own logging
1088 * of the failure cause.
1089 */
1090 rstreason = BANDLIM_RST_OPENPORT;
1091 goto dropwithreset;
1092 }
1093 if (so == NULL) {
1094 /*
1095 * We completed the 3-way handshake
1096 * but could not allocate a socket
1097 * either due to memory shortage,
1098 * listen queue length limits or
1099 * global socket limits. Send RST
1100 * or wait and have the remote end
1101 * retransmit the ACK for another
1102 * try.
1103 */
1104 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1105 log(LOG_DEBUG, "%s; %s: Listen socket: "
1106 "Socket allocation failed due to "
1107 "limits or memory shortage, %s\n",
1108 s, __func__,
1109 V_tcp_sc_rst_sock_fail ?
1110 "sending RST" : "try again");
1111 if (V_tcp_sc_rst_sock_fail) {
1112 rstreason = BANDLIM_UNLIMITED;
1113 goto dropwithreset;
1114 } else
1115 goto dropunlock;
1116 }
1117 /*
1118 * Socket is created in state SYN_RECEIVED.
1119 * Unlock the listen socket, lock the newly
1120 * created socket and update the tp variable.
1121 */
1122 INP_WUNLOCK(inp); /* listen socket */
1123 inp = sotoinpcb(so);
1124 INP_WLOCK(inp); /* new connection */
1125 tp = intotcpcb(inp);
1126 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1127 ("%s: ", __func__));
1128#ifdef TCP_SIGNATURE
1129 if (sig_checked == 0) {
1130 tcp_dooptions(&to, optp, optlen,
1131 (thflags & TH_SYN) ? TO_SYN : 0);
1132 if (!tcp_signature_verify_input(m, off0, tlen,
1133 optlen, &to, th, tp->t_flags)) {
1134
1135 /*
1136 * In SYN_SENT state if it receives an
1137 * RST, it is allowed for further
1138 * processing.
1139 */
1140 if ((thflags & TH_RST) == 0 ||
1141 (tp->t_state == TCPS_SYN_SENT) == 0)
1142 goto dropunlock;
1143 }
1144 sig_checked = 1;
1145 }
1146#endif
1147
1148 /*
1149 * Process the segment and the data it
1150 * contains. tcp_do_segment() consumes
1151 * the mbuf chain and unlocks the inpcb.
1152 */
1153 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1154 iptos, ti_locked);
1155 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1156 return;
1157 }
1158 /*
1159 * Segment flag validation for new connection attempts:
1160 *
1161 * Our (SYN|ACK) response was rejected.
1162 * Check with syncache and remove entry to prevent
1163 * retransmits.
1164 *
1165 * NB: syncache_chkrst does its own logging of failure
1166 * causes.
1167 */
1168 if (thflags & TH_RST) {
1169 syncache_chkrst(&inc, th);
1170 goto dropunlock;
1171 }
1172 /*
1173 * We can't do anything without SYN.
1174 */
1175 if ((thflags & TH_SYN) == 0) {
1176 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1177 log(LOG_DEBUG, "%s; %s: Listen socket: "
1178 "SYN is missing, segment ignored\n",
1179 s, __func__);
1180 TCPSTAT_INC(tcps_badsyn);
1181 goto dropunlock;
1182 }
1183 /*
1184 * (SYN|ACK) is bogus on a listen socket.
1185 */
1186 if (thflags & TH_ACK) {
1187 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1188 log(LOG_DEBUG, "%s; %s: Listen socket: "
1189 "SYN|ACK invalid, segment rejected\n",
1190 s, __func__);
1191 syncache_badack(&inc); /* XXX: Not needed! */
1192 TCPSTAT_INC(tcps_badsyn);
1193 rstreason = BANDLIM_RST_OPENPORT;
1194 goto dropwithreset;
1195 }
1196 /*
1197 * If the drop_synfin option is enabled, drop all
1198 * segments with both the SYN and FIN bits set.
1199 * This prevents e.g. nmap from identifying the
1200 * TCP/IP stack.
1201 * XXX: Poor reasoning. nmap has other methods
1202 * and is constantly refining its stack detection
1203 * strategies.
1204 * XXX: This is a violation of the TCP specification
1205 * and was used by RFC1644.
1206 */
1207 if ((thflags & TH_FIN) && V_drop_synfin) {
1208 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1209 log(LOG_DEBUG, "%s; %s: Listen socket: "
1210 "SYN|FIN segment ignored (based on "
1211 "sysctl setting)\n", s, __func__);
1212 TCPSTAT_INC(tcps_badsyn);
1213 goto dropunlock;
1214 }
1215 /*
1216 * Segment's flags are (SYN) or (SYN|FIN).
1217 *
1218 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1219 * as they do not affect the state of the TCP FSM.
1220 * The data pointed to by TH_URG and th_urp is ignored.
1221 */
1222 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1223 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1224 KASSERT(thflags & (TH_SYN),
1225 ("%s: Listen socket: TH_SYN not set", __func__));
1226#ifdef INET6
1227 /*
1228 * If deprecated address is forbidden,
1229 * we do not accept SYN to deprecated interface
1230 * address to prevent any new inbound connection from
1231 * getting established.
1232 * When we do not accept SYN, we send a TCP RST,
1233 * with deprecated source address (instead of dropping
1234 * it). We compromise it as it is much better for peer
1235 * to send a RST, and RST will be the final packet
1236 * for the exchange.
1237 *
1238 * If we do not forbid deprecated addresses, we accept
1239 * the SYN packet. RFC2462 does not suggest dropping
1240 * SYN in this case.
1241 * If we decipher RFC2462 5.5.4, it says like this:
1242 * 1. use of deprecated addr with existing
1243 * communication is okay - "SHOULD continue to be
1244 * used"
1245 * 2. use of it with new communication:
1246 * (2a) "SHOULD NOT be used if alternate address
1247 * with sufficient scope is available"
1248 * (2b) nothing mentioned otherwise.
1249 * Here we fall into (2b) case as we have no choice in
1250 * our source address selection - we must obey the peer.
1251 *
1252 * The wording in RFC2462 is confusing, and there are
1253 * multiple description text for deprecated address
1254 * handling - worse, they are not exactly the same.
1255 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1256 */
1257 if (isipv6 && !V_ip6_use_deprecated) {
1258 struct in6_ifaddr *ia6;
1259
1260 ia6 = ip6_getdstifaddr(m);
1261 if (ia6 != NULL &&
1262 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1263 ifa_free(&ia6->ia_ifa);
1264 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1265 log(LOG_DEBUG, "%s; %s: Listen socket: "
1266 "Connection attempt to deprecated "
1267 "IPv6 address rejected\n",
1268 s, __func__);
1269 rstreason = BANDLIM_RST_OPENPORT;
1270 goto dropwithreset;
1271 }
1272 if (ia6)
1273 ifa_free(&ia6->ia_ifa);
1274 }
1275#endif /* INET6 */
1276 /*
1277 * Basic sanity checks on incoming SYN requests:
1278 * Don't respond if the destination is a link layer
1279 * broadcast according to RFC1122 4.2.3.10, p. 104.
1280 * If it is from this socket it must be forged.
1281 * Don't respond if the source or destination is a
1282 * global or subnet broad- or multicast address.
1283 * Note that it is quite possible to receive unicast
1284 * link-layer packets with a broadcast IP address. Use
1285 * in_broadcast() to find them.
1286 */
1287 if (m->m_flags & (M_BCAST|M_MCAST)) {
1288 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1289 log(LOG_DEBUG, "%s; %s: Listen socket: "
1290 "Connection attempt from broad- or multicast "
1291 "link layer address ignored\n", s, __func__);
1292 goto dropunlock;
1293 }
1294#ifdef INET6
1295 if (isipv6) {
1296 if (th->th_dport == th->th_sport &&
1297 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1298 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1299 log(LOG_DEBUG, "%s; %s: Listen socket: "
1300 "Connection attempt to/from self "
1301 "ignored\n", s, __func__);
1302 goto dropunlock;
1303 }
1304 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1305 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1306 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1307 log(LOG_DEBUG, "%s; %s: Listen socket: "
1308 "Connection attempt from/to multicast "
1309 "address ignored\n", s, __func__);
1310 goto dropunlock;
1311 }
1312 }
1313#endif
1314#if defined(INET) && defined(INET6)
1315 else
1316#endif
1317#ifdef INET
1318 {
1319 if (th->th_dport == th->th_sport &&
1320 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1321 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1322 log(LOG_DEBUG, "%s; %s: Listen socket: "
1323 "Connection attempt from/to self "
1324 "ignored\n", s, __func__);
1325 goto dropunlock;
1326 }
1327 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1328 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1329 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1330 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1331 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1332 log(LOG_DEBUG, "%s; %s: Listen socket: "
1333 "Connection attempt from/to broad- "
1334 "or multicast address ignored\n",
1335 s, __func__);
1336 goto dropunlock;
1337 }
1338 }
1339#endif
1340 /*
1341 * SYN appears to be valid. Create compressed TCP state
1342 * for syncache.
1343 */
1344#ifdef TCPDEBUG
1345 if (so->so_options & SO_DEBUG)
1346 tcp_trace(TA_INPUT, ostate, tp,
1347 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1348#endif
1349 tcp_dooptions(&to, optp, optlen, TO_SYN);
1350 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
1351 /*
1352 * Entry added to syncache and mbuf consumed.
1353 * Everything already unlocked by syncache_add().
1354 */
1355 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1356 return;
1357 } else if (tp->t_state == TCPS_LISTEN) {
1358 /*
1359 * When a listen socket is torn down the SO_ACCEPTCONN
1360 * flag is removed first while connections are drained
1361 * from the accept queue in a unlock/lock cycle of the
1362 * ACCEPT_LOCK, opening a race condition allowing a SYN
1363 * attempt go through unhandled.
1364 */
1365 goto dropunlock;
1366 }
1367
1368#ifdef TCP_SIGNATURE
1369 if (sig_checked == 0) {
1370 tcp_dooptions(&to, optp, optlen,
1371 (thflags & TH_SYN) ? TO_SYN : 0);
1372 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to,
1373 th, tp->t_flags)) {
1374
1375 /*
1376 * In SYN_SENT state if it receives an RST, it is
1377 * allowed for further processing.
1378 */
1379 if ((thflags & TH_RST) == 0 ||
1380 (tp->t_state == TCPS_SYN_SENT) == 0)
1381 goto dropunlock;
1382 }
1383 sig_checked = 1;
1384 }
1385#endif
1386
698 if (th->th_sum) {
699 TCPSTAT_INC(tcps_rcvbadsum);
700 goto drop;
701 }
702 /* Re-initialization for later version check */
703 ip->ip_v = IPVERSION;
704 }
705#endif /* INET */
706
707#ifdef INET6
708 if (isipv6)
709 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
710#endif
711#if defined(INET) && defined(INET6)
712 else
713#endif
714#ifdef INET
715 iptos = ip->ip_tos;
716#endif
717
718 /*
719 * Check that TCP offset makes sense,
720 * pull out TCP options and adjust length. XXX
721 */
722 off = th->th_off << 2;
723 if (off < sizeof (struct tcphdr) || off > tlen) {
724 TCPSTAT_INC(tcps_rcvbadoff);
725 goto drop;
726 }
727 tlen -= off; /* tlen is used instead of ti->ti_len */
728 if (off > sizeof (struct tcphdr)) {
729#ifdef INET6
730 if (isipv6) {
731 IP6_EXTHDR_CHECK(m, off0, off, );
732 ip6 = mtod(m, struct ip6_hdr *);
733 th = (struct tcphdr *)((caddr_t)ip6 + off0);
734 }
735#endif
736#if defined(INET) && defined(INET6)
737 else
738#endif
739#ifdef INET
740 {
741 if (m->m_len < sizeof(struct ip) + off) {
742 if ((m = m_pullup(m, sizeof (struct ip) + off))
743 == NULL) {
744 TCPSTAT_INC(tcps_rcvshort);
745 return;
746 }
747 ip = mtod(m, struct ip *);
748 th = (struct tcphdr *)((caddr_t)ip + off0);
749 }
750 }
751#endif
752 optlen = off - sizeof (struct tcphdr);
753 optp = (u_char *)(th + 1);
754 }
755 thflags = th->th_flags;
756
757 /*
758 * Convert TCP protocol specific fields to host format.
759 */
760 tcp_fields_to_host(th);
761
762 /*
763 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
764 */
765 drop_hdrlen = off0 + off;
766
767 /*
768 * Locate pcb for segment; if we're likely to add or remove a
769 * connection then first acquire pcbinfo lock. There are two cases
770 * where we might discover later we need a write lock despite the
771 * flags: ACKs moving a connection out of the syncache, and ACKs for
772 * a connection in TIMEWAIT.
773 */
774 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) {
775 INP_INFO_WLOCK(&V_tcbinfo);
776 ti_locked = TI_WLOCKED;
777 } else
778 ti_locked = TI_UNLOCKED;
779
780 /*
781 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
782 */
783 if (
784#ifdef INET6
785 (isipv6 && (m->m_flags & M_IP6_NEXTHOP))
786#ifdef INET
787 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP))
788#endif
789#endif
790#if defined(INET) && !defined(INET6)
791 (m->m_flags & M_IP_NEXTHOP)
792#endif
793 )
794 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
795
796findpcb:
797#ifdef INVARIANTS
798 if (ti_locked == TI_WLOCKED) {
799 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
800 } else {
801 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
802 }
803#endif
804#ifdef INET6
805 if (isipv6 && fwd_tag != NULL) {
806 struct sockaddr_in6 *next_hop6;
807
808 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1);
809 /*
810 * Transparently forwarded. Pretend to be the destination.
811 * Already got one like this?
812 */
813 inp = in6_pcblookup_mbuf(&V_tcbinfo,
814 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport,
815 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m);
816 if (!inp) {
817 /*
818 * It's new. Try to find the ambushing socket.
819 * Because we've rewritten the destination address,
820 * any hardware-generated hash is ignored.
821 */
822 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src,
823 th->th_sport, &next_hop6->sin6_addr,
824 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) :
825 th->th_dport, INPLOOKUP_WILDCARD |
826 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
827 }
828 } else if (isipv6) {
829 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src,
830 th->th_sport, &ip6->ip6_dst, th->th_dport,
831 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
832 m->m_pkthdr.rcvif, m);
833 }
834#endif /* INET6 */
835#if defined(INET6) && defined(INET)
836 else
837#endif
838#ifdef INET
839 if (fwd_tag != NULL) {
840 struct sockaddr_in *next_hop;
841
842 next_hop = (struct sockaddr_in *)(fwd_tag+1);
843 /*
844 * Transparently forwarded. Pretend to be the destination.
845 * already got one like this?
846 */
847 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport,
848 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB,
849 m->m_pkthdr.rcvif, m);
850 if (!inp) {
851 /*
852 * It's new. Try to find the ambushing socket.
853 * Because we've rewritten the destination address,
854 * any hardware-generated hash is ignored.
855 */
856 inp = in_pcblookup(&V_tcbinfo, ip->ip_src,
857 th->th_sport, next_hop->sin_addr,
858 next_hop->sin_port ? ntohs(next_hop->sin_port) :
859 th->th_dport, INPLOOKUP_WILDCARD |
860 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif);
861 }
862 } else
863 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src,
864 th->th_sport, ip->ip_dst, th->th_dport,
865 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB,
866 m->m_pkthdr.rcvif, m);
867#endif /* INET */
868
869 /*
870 * If the INPCB does not exist then all data in the incoming
871 * segment is discarded and an appropriate RST is sent back.
872 * XXX MRT Send RST using which routing table?
873 */
874 if (inp == NULL) {
875 /*
876 * Log communication attempts to ports that are not
877 * in use.
878 */
879 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
880 tcp_log_in_vain == 2) {
881 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
882 log(LOG_INFO, "%s; %s: Connection attempt "
883 "to closed port\n", s, __func__);
884 }
885 /*
886 * When blackholing do not respond with a RST but
887 * completely ignore the segment and drop it.
888 */
889 if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
890 V_blackhole == 2)
891 goto dropunlock;
892
893 rstreason = BANDLIM_RST_CLOSEDPORT;
894 goto dropwithreset;
895 }
896 INP_WLOCK_ASSERT(inp);
897 if (!(inp->inp_flags & INP_HW_FLOWID)
898 && (m->m_flags & M_FLOWID)
899 && ((inp->inp_socket == NULL)
900 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) {
901 inp->inp_flags |= INP_HW_FLOWID;
902 inp->inp_flags &= ~INP_SW_FLOWID;
903 inp->inp_flowid = m->m_pkthdr.flowid;
904 }
905#ifdef IPSEC
906#ifdef INET6
907 if (isipv6 && ipsec6_in_reject(m, inp)) {
908 IPSEC6STAT_INC(ips_in_polvio);
909 goto dropunlock;
910 } else
911#endif /* INET6 */
912 if (ipsec4_in_reject(m, inp) != 0) {
913 IPSECSTAT_INC(ips_in_polvio);
914 goto dropunlock;
915 }
916#endif /* IPSEC */
917
918 /*
919 * Check the minimum TTL for socket.
920 */
921 if (inp->inp_ip_minttl != 0) {
922#ifdef INET6
923 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
924 goto dropunlock;
925 else
926#endif
927 if (inp->inp_ip_minttl > ip->ip_ttl)
928 goto dropunlock;
929 }
930
931 /*
932 * A previous connection in TIMEWAIT state is supposed to catch stray
933 * or duplicate segments arriving late. If this segment was a
934 * legitimate new connection attempt, the old INPCB gets removed and
935 * we can try again to find a listening socket.
936 *
937 * At this point, due to earlier optimism, we may hold only an inpcb
938 * lock, and not the inpcbinfo write lock. If so, we need to try to
939 * acquire it, or if that fails, acquire a reference on the inpcb,
940 * drop all locks, acquire a global write lock, and then re-acquire
941 * the inpcb lock. We may at that point discover that another thread
942 * has tried to free the inpcb, in which case we need to loop back
943 * and try to find a new inpcb to deliver to.
944 *
945 * XXXRW: It may be time to rethink timewait locking.
946 */
947relocked:
948 if (inp->inp_flags & INP_TIMEWAIT) {
949 if (ti_locked == TI_UNLOCKED) {
950 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
951 in_pcbref(inp);
952 INP_WUNLOCK(inp);
953 INP_INFO_WLOCK(&V_tcbinfo);
954 ti_locked = TI_WLOCKED;
955 INP_WLOCK(inp);
956 if (in_pcbrele_wlocked(inp)) {
957 inp = NULL;
958 goto findpcb;
959 }
960 } else
961 ti_locked = TI_WLOCKED;
962 }
963 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
964
965 if (thflags & TH_SYN)
966 tcp_dooptions(&to, optp, optlen, TO_SYN);
967 /*
968 * NB: tcp_twcheck unlocks the INP and frees the mbuf.
969 */
970 if (tcp_twcheck(inp, &to, th, m, tlen))
971 goto findpcb;
972 INP_INFO_WUNLOCK(&V_tcbinfo);
973 return;
974 }
975 /*
976 * The TCPCB may no longer exist if the connection is winding
977 * down or it is in the CLOSED state. Either way we drop the
978 * segment and send an appropriate response.
979 */
980 tp = intotcpcb(inp);
981 if (tp == NULL || tp->t_state == TCPS_CLOSED) {
982 rstreason = BANDLIM_RST_CLOSEDPORT;
983 goto dropwithreset;
984 }
985
986#ifdef TCP_OFFLOAD
987 if (tp->t_flags & TF_TOE) {
988 tcp_offload_input(tp, m);
989 m = NULL; /* consumed by the TOE driver */
990 goto dropunlock;
991 }
992#endif
993
994 /*
995 * We've identified a valid inpcb, but it could be that we need an
996 * inpcbinfo write lock but don't hold it. In this case, attempt to
997 * acquire using the same strategy as the TIMEWAIT case above. If we
998 * relock, we have to jump back to 'relocked' as the connection might
999 * now be in TIMEWAIT.
1000 */
1001#ifdef INVARIANTS
1002 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0)
1003 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1004#endif
1005 if (tp->t_state != TCPS_ESTABLISHED) {
1006 if (ti_locked == TI_UNLOCKED) {
1007 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) {
1008 in_pcbref(inp);
1009 INP_WUNLOCK(inp);
1010 INP_INFO_WLOCK(&V_tcbinfo);
1011 ti_locked = TI_WLOCKED;
1012 INP_WLOCK(inp);
1013 if (in_pcbrele_wlocked(inp)) {
1014 inp = NULL;
1015 goto findpcb;
1016 }
1017 goto relocked;
1018 } else
1019 ti_locked = TI_WLOCKED;
1020 }
1021 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1022 }
1023
1024#ifdef MAC
1025 INP_WLOCK_ASSERT(inp);
1026 if (mac_inpcb_check_deliver(inp, m))
1027 goto dropunlock;
1028#endif
1029 so = inp->inp_socket;
1030 KASSERT(so != NULL, ("%s: so == NULL", __func__));
1031#ifdef TCPDEBUG
1032 if (so->so_options & SO_DEBUG) {
1033 ostate = tp->t_state;
1034#ifdef INET6
1035 if (isipv6) {
1036 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
1037 } else
1038#endif
1039 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1040 tcp_savetcp = *th;
1041 }
1042#endif /* TCPDEBUG */
1043 /*
1044 * When the socket is accepting connections (the INPCB is in LISTEN
1045 * state) we look into the SYN cache if this is a new connection
1046 * attempt or the completion of a previous one. Because listen
1047 * sockets are never in TCPS_ESTABLISHED, the V_tcbinfo lock will be
1048 * held in this case.
1049 */
1050 if (so->so_options & SO_ACCEPTCONN) {
1051 struct in_conninfo inc;
1052
1053 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
1054 "tp not listening", __func__));
1055 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1056
1057 bzero(&inc, sizeof(inc));
1058#ifdef INET6
1059 if (isipv6) {
1060 inc.inc_flags |= INC_ISIPV6;
1061 inc.inc6_faddr = ip6->ip6_src;
1062 inc.inc6_laddr = ip6->ip6_dst;
1063 } else
1064#endif
1065 {
1066 inc.inc_faddr = ip->ip_src;
1067 inc.inc_laddr = ip->ip_dst;
1068 }
1069 inc.inc_fport = th->th_sport;
1070 inc.inc_lport = th->th_dport;
1071 inc.inc_fibnum = so->so_fibnum;
1072
1073 /*
1074 * Check for an existing connection attempt in syncache if
1075 * the flag is only ACK. A successful lookup creates a new
1076 * socket appended to the listen queue in SYN_RECEIVED state.
1077 */
1078 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
1079 /*
1080 * Parse the TCP options here because
1081 * syncookies need access to the reflected
1082 * timestamp.
1083 */
1084 tcp_dooptions(&to, optp, optlen, 0);
1085 /*
1086 * NB: syncache_expand() doesn't unlock
1087 * inp and tcpinfo locks.
1088 */
1089 if (!syncache_expand(&inc, &to, th, &so, m)) {
1090 /*
1091 * No syncache entry or ACK was not
1092 * for our SYN/ACK. Send a RST.
1093 * NB: syncache did its own logging
1094 * of the failure cause.
1095 */
1096 rstreason = BANDLIM_RST_OPENPORT;
1097 goto dropwithreset;
1098 }
1099 if (so == NULL) {
1100 /*
1101 * We completed the 3-way handshake
1102 * but could not allocate a socket
1103 * either due to memory shortage,
1104 * listen queue length limits or
1105 * global socket limits. Send RST
1106 * or wait and have the remote end
1107 * retransmit the ACK for another
1108 * try.
1109 */
1110 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1111 log(LOG_DEBUG, "%s; %s: Listen socket: "
1112 "Socket allocation failed due to "
1113 "limits or memory shortage, %s\n",
1114 s, __func__,
1115 V_tcp_sc_rst_sock_fail ?
1116 "sending RST" : "try again");
1117 if (V_tcp_sc_rst_sock_fail) {
1118 rstreason = BANDLIM_UNLIMITED;
1119 goto dropwithreset;
1120 } else
1121 goto dropunlock;
1122 }
1123 /*
1124 * Socket is created in state SYN_RECEIVED.
1125 * Unlock the listen socket, lock the newly
1126 * created socket and update the tp variable.
1127 */
1128 INP_WUNLOCK(inp); /* listen socket */
1129 inp = sotoinpcb(so);
1130 INP_WLOCK(inp); /* new connection */
1131 tp = intotcpcb(inp);
1132 KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
1133 ("%s: ", __func__));
1134#ifdef TCP_SIGNATURE
1135 if (sig_checked == 0) {
1136 tcp_dooptions(&to, optp, optlen,
1137 (thflags & TH_SYN) ? TO_SYN : 0);
1138 if (!tcp_signature_verify_input(m, off0, tlen,
1139 optlen, &to, th, tp->t_flags)) {
1140
1141 /*
1142 * In SYN_SENT state if it receives an
1143 * RST, it is allowed for further
1144 * processing.
1145 */
1146 if ((thflags & TH_RST) == 0 ||
1147 (tp->t_state == TCPS_SYN_SENT) == 0)
1148 goto dropunlock;
1149 }
1150 sig_checked = 1;
1151 }
1152#endif
1153
1154 /*
1155 * Process the segment and the data it
1156 * contains. tcp_do_segment() consumes
1157 * the mbuf chain and unlocks the inpcb.
1158 */
1159 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
1160 iptos, ti_locked);
1161 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1162 return;
1163 }
1164 /*
1165 * Segment flag validation for new connection attempts:
1166 *
1167 * Our (SYN|ACK) response was rejected.
1168 * Check with syncache and remove entry to prevent
1169 * retransmits.
1170 *
1171 * NB: syncache_chkrst does its own logging of failure
1172 * causes.
1173 */
1174 if (thflags & TH_RST) {
1175 syncache_chkrst(&inc, th);
1176 goto dropunlock;
1177 }
1178 /*
1179 * We can't do anything without SYN.
1180 */
1181 if ((thflags & TH_SYN) == 0) {
1182 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1183 log(LOG_DEBUG, "%s; %s: Listen socket: "
1184 "SYN is missing, segment ignored\n",
1185 s, __func__);
1186 TCPSTAT_INC(tcps_badsyn);
1187 goto dropunlock;
1188 }
1189 /*
1190 * (SYN|ACK) is bogus on a listen socket.
1191 */
1192 if (thflags & TH_ACK) {
1193 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1194 log(LOG_DEBUG, "%s; %s: Listen socket: "
1195 "SYN|ACK invalid, segment rejected\n",
1196 s, __func__);
1197 syncache_badack(&inc); /* XXX: Not needed! */
1198 TCPSTAT_INC(tcps_badsyn);
1199 rstreason = BANDLIM_RST_OPENPORT;
1200 goto dropwithreset;
1201 }
1202 /*
1203 * If the drop_synfin option is enabled, drop all
1204 * segments with both the SYN and FIN bits set.
1205 * This prevents e.g. nmap from identifying the
1206 * TCP/IP stack.
1207 * XXX: Poor reasoning. nmap has other methods
1208 * and is constantly refining its stack detection
1209 * strategies.
1210 * XXX: This is a violation of the TCP specification
1211 * and was used by RFC1644.
1212 */
1213 if ((thflags & TH_FIN) && V_drop_synfin) {
1214 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1215 log(LOG_DEBUG, "%s; %s: Listen socket: "
1216 "SYN|FIN segment ignored (based on "
1217 "sysctl setting)\n", s, __func__);
1218 TCPSTAT_INC(tcps_badsyn);
1219 goto dropunlock;
1220 }
1221 /*
1222 * Segment's flags are (SYN) or (SYN|FIN).
1223 *
1224 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
1225 * as they do not affect the state of the TCP FSM.
1226 * The data pointed to by TH_URG and th_urp is ignored.
1227 */
1228 KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
1229 ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
1230 KASSERT(thflags & (TH_SYN),
1231 ("%s: Listen socket: TH_SYN not set", __func__));
1232#ifdef INET6
1233 /*
1234 * If deprecated address is forbidden,
1235 * we do not accept SYN to deprecated interface
1236 * address to prevent any new inbound connection from
1237 * getting established.
1238 * When we do not accept SYN, we send a TCP RST,
1239 * with deprecated source address (instead of dropping
1240 * it). We compromise it as it is much better for peer
1241 * to send a RST, and RST will be the final packet
1242 * for the exchange.
1243 *
1244 * If we do not forbid deprecated addresses, we accept
1245 * the SYN packet. RFC2462 does not suggest dropping
1246 * SYN in this case.
1247 * If we decipher RFC2462 5.5.4, it says like this:
1248 * 1. use of deprecated addr with existing
1249 * communication is okay - "SHOULD continue to be
1250 * used"
1251 * 2. use of it with new communication:
1252 * (2a) "SHOULD NOT be used if alternate address
1253 * with sufficient scope is available"
1254 * (2b) nothing mentioned otherwise.
1255 * Here we fall into (2b) case as we have no choice in
1256 * our source address selection - we must obey the peer.
1257 *
1258 * The wording in RFC2462 is confusing, and there are
1259 * multiple description text for deprecated address
1260 * handling - worse, they are not exactly the same.
1261 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1262 */
1263 if (isipv6 && !V_ip6_use_deprecated) {
1264 struct in6_ifaddr *ia6;
1265
1266 ia6 = ip6_getdstifaddr(m);
1267 if (ia6 != NULL &&
1268 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
1269 ifa_free(&ia6->ia_ifa);
1270 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1271 log(LOG_DEBUG, "%s; %s: Listen socket: "
1272 "Connection attempt to deprecated "
1273 "IPv6 address rejected\n",
1274 s, __func__);
1275 rstreason = BANDLIM_RST_OPENPORT;
1276 goto dropwithreset;
1277 }
1278 if (ia6)
1279 ifa_free(&ia6->ia_ifa);
1280 }
1281#endif /* INET6 */
1282 /*
1283 * Basic sanity checks on incoming SYN requests:
1284 * Don't respond if the destination is a link layer
1285 * broadcast according to RFC1122 4.2.3.10, p. 104.
1286 * If it is from this socket it must be forged.
1287 * Don't respond if the source or destination is a
1288 * global or subnet broad- or multicast address.
1289 * Note that it is quite possible to receive unicast
1290 * link-layer packets with a broadcast IP address. Use
1291 * in_broadcast() to find them.
1292 */
1293 if (m->m_flags & (M_BCAST|M_MCAST)) {
1294 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1295 log(LOG_DEBUG, "%s; %s: Listen socket: "
1296 "Connection attempt from broad- or multicast "
1297 "link layer address ignored\n", s, __func__);
1298 goto dropunlock;
1299 }
1300#ifdef INET6
1301 if (isipv6) {
1302 if (th->th_dport == th->th_sport &&
1303 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
1304 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1305 log(LOG_DEBUG, "%s; %s: Listen socket: "
1306 "Connection attempt to/from self "
1307 "ignored\n", s, __func__);
1308 goto dropunlock;
1309 }
1310 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1311 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
1312 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1313 log(LOG_DEBUG, "%s; %s: Listen socket: "
1314 "Connection attempt from/to multicast "
1315 "address ignored\n", s, __func__);
1316 goto dropunlock;
1317 }
1318 }
1319#endif
1320#if defined(INET) && defined(INET6)
1321 else
1322#endif
1323#ifdef INET
1324 {
1325 if (th->th_dport == th->th_sport &&
1326 ip->ip_dst.s_addr == ip->ip_src.s_addr) {
1327 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1328 log(LOG_DEBUG, "%s; %s: Listen socket: "
1329 "Connection attempt from/to self "
1330 "ignored\n", s, __func__);
1331 goto dropunlock;
1332 }
1333 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1334 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1335 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1336 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
1337 if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
1338 log(LOG_DEBUG, "%s; %s: Listen socket: "
1339 "Connection attempt from/to broad- "
1340 "or multicast address ignored\n",
1341 s, __func__);
1342 goto dropunlock;
1343 }
1344 }
1345#endif
1346 /*
1347 * SYN appears to be valid. Create compressed TCP state
1348 * for syncache.
1349 */
1350#ifdef TCPDEBUG
1351 if (so->so_options & SO_DEBUG)
1352 tcp_trace(TA_INPUT, ostate, tp,
1353 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1354#endif
1355 tcp_dooptions(&to, optp, optlen, TO_SYN);
1356 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL);
1357 /*
1358 * Entry added to syncache and mbuf consumed.
1359 * Everything already unlocked by syncache_add().
1360 */
1361 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1362 return;
1363 } else if (tp->t_state == TCPS_LISTEN) {
1364 /*
1365 * When a listen socket is torn down the SO_ACCEPTCONN
1366 * flag is removed first while connections are drained
1367 * from the accept queue in a unlock/lock cycle of the
1368 * ACCEPT_LOCK, opening a race condition allowing a SYN
1369 * attempt go through unhandled.
1370 */
1371 goto dropunlock;
1372 }
1373
1374#ifdef TCP_SIGNATURE
1375 if (sig_checked == 0) {
1376 tcp_dooptions(&to, optp, optlen,
1377 (thflags & TH_SYN) ? TO_SYN : 0);
1378 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to,
1379 th, tp->t_flags)) {
1380
1381 /*
1382 * In SYN_SENT state if it receives an RST, it is
1383 * allowed for further processing.
1384 */
1385 if ((thflags & TH_RST) == 0 ||
1386 (tp->t_state == TCPS_SYN_SENT) == 0)
1387 goto dropunlock;
1388 }
1389 sig_checked = 1;
1390 }
1391#endif
1392
1393 TCP_PROBE5(receive, NULL, tp, m->m_data, tp, th);
1394
1387 /*
1388 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1389 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1390 * the inpcb, and unlocks pcbinfo.
1391 */
1392 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1393 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1394 return;
1395
1396dropwithreset:
1395 /*
1396 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
1397 * state. tcp_do_segment() always consumes the mbuf chain, unlocks
1398 * the inpcb, and unlocks pcbinfo.
1399 */
1400 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
1401 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1402 return;
1403
1404dropwithreset:
1405 TCP_PROBE5(receive, NULL, tp, m->m_data, tp, th);
1406
1397 if (ti_locked == TI_WLOCKED) {
1398 INP_INFO_WUNLOCK(&V_tcbinfo);
1399 ti_locked = TI_UNLOCKED;
1400 }
1401#ifdef INVARIANTS
1402 else {
1403 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1404 "ti_locked: %d", __func__, ti_locked));
1405 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1406 }
1407#endif
1408
1409 if (inp != NULL) {
1410 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1411 INP_WUNLOCK(inp);
1412 } else
1413 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1414 m = NULL; /* mbuf chain got consumed. */
1415 goto drop;
1416
1417dropunlock:
1407 if (ti_locked == TI_WLOCKED) {
1408 INP_INFO_WUNLOCK(&V_tcbinfo);
1409 ti_locked = TI_UNLOCKED;
1410 }
1411#ifdef INVARIANTS
1412 else {
1413 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset "
1414 "ti_locked: %d", __func__, ti_locked));
1415 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1416 }
1417#endif
1418
1419 if (inp != NULL) {
1420 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1421 INP_WUNLOCK(inp);
1422 } else
1423 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1424 m = NULL; /* mbuf chain got consumed. */
1425 goto drop;
1426
1427dropunlock:
1428 if (m != NULL)
1429 TCP_PROBE5(receive, NULL, tp, m->m_data, tp, th);
1430
1418 if (ti_locked == TI_WLOCKED) {
1419 INP_INFO_WUNLOCK(&V_tcbinfo);
1420 ti_locked = TI_UNLOCKED;
1421 }
1422#ifdef INVARIANTS
1423 else {
1424 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1425 "ti_locked: %d", __func__, ti_locked));
1426 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1427 }
1428#endif
1429
1430 if (inp != NULL)
1431 INP_WUNLOCK(inp);
1432
1433drop:
1434 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1435 if (s != NULL)
1436 free(s, M_TCPLOG);
1437 if (m != NULL)
1438 m_freem(m);
1439}
1440
1441static void
1442tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1443 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1444 int ti_locked)
1445{
1446 int thflags, acked, ourfinisacked, needoutput = 0;
1447 int rstreason, todrop, win;
1448 u_long tiwin;
1449 char *s;
1450 struct in_conninfo *inc;
1451 struct tcpopt to;
1452
1453#ifdef TCPDEBUG
1454 /*
1455 * The size of tcp_saveipgen must be the size of the max ip header,
1456 * now IPv6.
1457 */
1458 u_char tcp_saveipgen[IP6_HDR_LEN];
1459 struct tcphdr tcp_savetcp;
1460 short ostate = 0;
1461#endif
1462 thflags = th->th_flags;
1463 inc = &tp->t_inpcb->inp_inc;
1464 tp->sackhint.last_sack_ack = 0;
1465
1466 /*
1467 * If this is either a state-changing packet or current state isn't
1468 * established, we require a write lock on tcbinfo. Otherwise, we
1469 * allow the tcbinfo to be in either alocked or unlocked, as the
1470 * caller may have unnecessarily acquired a write lock due to a race.
1471 */
1472 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1473 tp->t_state != TCPS_ESTABLISHED) {
1474 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for "
1475 "SYN/FIN/RST/!EST", __func__, ti_locked));
1476 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1477 } else {
1478#ifdef INVARIANTS
1479 if (ti_locked == TI_WLOCKED)
1480 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1481 else {
1482 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1483 "ti_locked: %d", __func__, ti_locked));
1484 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1485 }
1486#endif
1487 }
1488 INP_WLOCK_ASSERT(tp->t_inpcb);
1489 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1490 __func__));
1491 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1492 __func__));
1493
1494 /*
1495 * Segment received on connection.
1496 * Reset idle time and keep-alive timer.
1497 * XXX: This should be done after segment
1498 * validation to ignore broken/spoofed segs.
1499 */
1500 tp->t_rcvtime = ticks;
1501 if (TCPS_HAVEESTABLISHED(tp->t_state))
1502 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
1503
1504 /*
1505 * Unscale the window into a 32-bit value.
1506 * For the SYN_SENT state the scale is zero.
1507 */
1508 tiwin = th->th_win << tp->snd_scale;
1509
1510 /*
1511 * TCP ECN processing.
1512 */
1513 if (tp->t_flags & TF_ECN_PERMIT) {
1514 if (thflags & TH_CWR)
1515 tp->t_flags &= ~TF_ECN_SND_ECE;
1516 switch (iptos & IPTOS_ECN_MASK) {
1517 case IPTOS_ECN_CE:
1518 tp->t_flags |= TF_ECN_SND_ECE;
1519 TCPSTAT_INC(tcps_ecn_ce);
1520 break;
1521 case IPTOS_ECN_ECT0:
1522 TCPSTAT_INC(tcps_ecn_ect0);
1523 break;
1524 case IPTOS_ECN_ECT1:
1525 TCPSTAT_INC(tcps_ecn_ect1);
1526 break;
1527 }
1528 /* Congestion experienced. */
1529 if (thflags & TH_ECE) {
1530 cc_cong_signal(tp, th, CC_ECN);
1531 }
1532 }
1533
1534 /*
1535 * Parse options on any incoming segment.
1536 */
1537 tcp_dooptions(&to, (u_char *)(th + 1),
1538 (th->th_off << 2) - sizeof(struct tcphdr),
1539 (thflags & TH_SYN) ? TO_SYN : 0);
1540
1541 /*
1542 * If echoed timestamp is later than the current time,
1543 * fall back to non RFC1323 RTT calculation. Normalize
1544 * timestamp if syncookies were used when this connection
1545 * was established.
1546 */
1547 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1548 to.to_tsecr -= tp->ts_offset;
1549 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1550 to.to_tsecr = 0;
1551 }
1552 /*
1553 * If timestamps were negotiated during SYN/ACK they should
1554 * appear on every segment during this session and vice versa.
1555 */
1556 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1557 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1558 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1559 "no action\n", s, __func__);
1560 free(s, M_TCPLOG);
1561 }
1562 }
1563 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1564 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1565 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1566 "no action\n", s, __func__);
1567 free(s, M_TCPLOG);
1568 }
1569 }
1570
1571 /*
1572 * Process options only when we get SYN/ACK back. The SYN case
1573 * for incoming connections is handled in tcp_syncache.
1574 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1575 * or <SYN,ACK>) segment itself is never scaled.
1576 * XXX this is traditional behavior, may need to be cleaned up.
1577 */
1578 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1579 if ((to.to_flags & TOF_SCALE) &&
1580 (tp->t_flags & TF_REQ_SCALE)) {
1581 tp->t_flags |= TF_RCVD_SCALE;
1582 tp->snd_scale = to.to_wscale;
1583 }
1584 /*
1585 * Initial send window. It will be updated with
1586 * the next incoming segment to the scaled value.
1587 */
1588 tp->snd_wnd = th->th_win;
1589 if (to.to_flags & TOF_TS) {
1590 tp->t_flags |= TF_RCVD_TSTMP;
1591 tp->ts_recent = to.to_tsval;
1592 tp->ts_recent_age = tcp_ts_getticks();
1593 }
1594 if (to.to_flags & TOF_MSS)
1595 tcp_mss(tp, to.to_mss);
1596 if ((tp->t_flags & TF_SACK_PERMIT) &&
1597 (to.to_flags & TOF_SACKPERM) == 0)
1598 tp->t_flags &= ~TF_SACK_PERMIT;
1599 }
1600
1601 /*
1602 * Header prediction: check for the two common cases
1603 * of a uni-directional data xfer. If the packet has
1604 * no control flags, is in-sequence, the window didn't
1605 * change and we're not retransmitting, it's a
1606 * candidate. If the length is zero and the ack moved
1607 * forward, we're the sender side of the xfer. Just
1608 * free the data acked & wake any higher level process
1609 * that was blocked waiting for space. If the length
1610 * is non-zero and the ack didn't move, we're the
1611 * receiver side. If we're getting packets in-order
1612 * (the reassembly queue is empty), add the data to
1613 * the socket buffer and note that we need a delayed ack.
1614 * Make sure that the hidden state-flags are also off.
1615 * Since we check for TCPS_ESTABLISHED first, it can only
1616 * be TH_NEEDSYN.
1617 */
1618 if (tp->t_state == TCPS_ESTABLISHED &&
1619 th->th_seq == tp->rcv_nxt &&
1620 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1621 tp->snd_nxt == tp->snd_max &&
1622 tiwin && tiwin == tp->snd_wnd &&
1623 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1624 LIST_EMPTY(&tp->t_segq) &&
1625 ((to.to_flags & TOF_TS) == 0 ||
1626 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1627
1628 /*
1629 * If last ACK falls within this segment's sequence numbers,
1630 * record the timestamp.
1631 * NOTE that the test is modified according to the latest
1632 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1633 */
1634 if ((to.to_flags & TOF_TS) != 0 &&
1635 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1636 tp->ts_recent_age = tcp_ts_getticks();
1637 tp->ts_recent = to.to_tsval;
1638 }
1639
1640 if (tlen == 0) {
1641 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1642 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1643 !IN_RECOVERY(tp->t_flags) &&
1644 (to.to_flags & TOF_SACK) == 0 &&
1645 TAILQ_EMPTY(&tp->snd_holes)) {
1646 /*
1647 * This is a pure ack for outstanding data.
1648 */
1649 if (ti_locked == TI_WLOCKED)
1650 INP_INFO_WUNLOCK(&V_tcbinfo);
1651 ti_locked = TI_UNLOCKED;
1652
1653 TCPSTAT_INC(tcps_predack);
1654
1655 /*
1656 * "bad retransmit" recovery.
1657 */
1658 if (tp->t_rxtshift == 1 &&
1659 tp->t_flags & TF_PREVVALID &&
1660 (int)(ticks - tp->t_badrxtwin) < 0) {
1661 cc_cong_signal(tp, th, CC_RTO_ERR);
1662 }
1663
1664 /*
1665 * Recalculate the transmit timer / rtt.
1666 *
1667 * Some boxes send broken timestamp replies
1668 * during the SYN+ACK phase, ignore
1669 * timestamps of 0 or we could calculate a
1670 * huge RTT and blow up the retransmit timer.
1671 */
1672 if ((to.to_flags & TOF_TS) != 0 &&
1673 to.to_tsecr) {
1674 u_int t;
1675
1676 t = tcp_ts_getticks() - to.to_tsecr;
1677 if (!tp->t_rttlow || tp->t_rttlow > t)
1678 tp->t_rttlow = t;
1679 tcp_xmit_timer(tp,
1680 TCP_TS_TO_TICKS(t) + 1);
1681 } else if (tp->t_rtttime &&
1682 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1683 if (!tp->t_rttlow ||
1684 tp->t_rttlow > ticks - tp->t_rtttime)
1685 tp->t_rttlow = ticks - tp->t_rtttime;
1686 tcp_xmit_timer(tp,
1687 ticks - tp->t_rtttime);
1688 }
1689 acked = BYTES_THIS_ACK(tp, th);
1690
1691 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1692 hhook_run_tcp_est_in(tp, th, &to);
1693
1694 TCPSTAT_INC(tcps_rcvackpack);
1695 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1696 sbdrop(&so->so_snd, acked);
1697 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1698 SEQ_LEQ(th->th_ack, tp->snd_recover))
1699 tp->snd_recover = th->th_ack - 1;
1700
1701 /*
1702 * Let the congestion control algorithm update
1703 * congestion control related information. This
1704 * typically means increasing the congestion
1705 * window.
1706 */
1707 cc_ack_received(tp, th, CC_ACK);
1708
1709 tp->snd_una = th->th_ack;
1710 /*
1711 * Pull snd_wl2 up to prevent seq wrap relative
1712 * to th_ack.
1713 */
1714 tp->snd_wl2 = th->th_ack;
1715 tp->t_dupacks = 0;
1716 m_freem(m);
1717 ND6_HINT(tp); /* Some progress has been made. */
1718
1719 /*
1720 * If all outstanding data are acked, stop
1721 * retransmit timer, otherwise restart timer
1722 * using current (possibly backed-off) value.
1723 * If process is waiting for space,
1724 * wakeup/selwakeup/signal. If data
1725 * are ready to send, let tcp_output
1726 * decide between more output or persist.
1727 */
1728#ifdef TCPDEBUG
1729 if (so->so_options & SO_DEBUG)
1730 tcp_trace(TA_INPUT, ostate, tp,
1731 (void *)tcp_saveipgen,
1732 &tcp_savetcp, 0);
1733#endif
1734 if (tp->snd_una == tp->snd_max)
1735 tcp_timer_activate(tp, TT_REXMT, 0);
1736 else if (!tcp_timer_active(tp, TT_PERSIST))
1737 tcp_timer_activate(tp, TT_REXMT,
1738 tp->t_rxtcur);
1739 sowwakeup(so);
1740 if (so->so_snd.sb_cc)
1741 (void) tcp_output(tp);
1742 goto check_delack;
1743 }
1744 } else if (th->th_ack == tp->snd_una &&
1745 tlen <= sbspace(&so->so_rcv)) {
1746 int newsize = 0; /* automatic sockbuf scaling */
1747
1748 /*
1749 * This is a pure, in-sequence data packet with
1750 * nothing on the reassembly queue and we have enough
1751 * buffer space to take it.
1752 */
1753 if (ti_locked == TI_WLOCKED)
1754 INP_INFO_WUNLOCK(&V_tcbinfo);
1755 ti_locked = TI_UNLOCKED;
1756
1757 /* Clean receiver SACK report if present */
1758 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1759 tcp_clean_sackreport(tp);
1760 TCPSTAT_INC(tcps_preddat);
1761 tp->rcv_nxt += tlen;
1762 /*
1763 * Pull snd_wl1 up to prevent seq wrap relative to
1764 * th_seq.
1765 */
1766 tp->snd_wl1 = th->th_seq;
1767 /*
1768 * Pull rcv_up up to prevent seq wrap relative to
1769 * rcv_nxt.
1770 */
1771 tp->rcv_up = tp->rcv_nxt;
1772 TCPSTAT_INC(tcps_rcvpack);
1773 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1774 ND6_HINT(tp); /* Some progress has been made */
1775#ifdef TCPDEBUG
1776 if (so->so_options & SO_DEBUG)
1777 tcp_trace(TA_INPUT, ostate, tp,
1778 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1779#endif
1780 /*
1781 * Automatic sizing of receive socket buffer. Often the send
1782 * buffer size is not optimally adjusted to the actual network
1783 * conditions at hand (delay bandwidth product). Setting the
1784 * buffer size too small limits throughput on links with high
1785 * bandwidth and high delay (eg. trans-continental/oceanic links).
1786 *
1787 * On the receive side the socket buffer memory is only rarely
1788 * used to any significant extent. This allows us to be much
1789 * more aggressive in scaling the receive socket buffer. For
1790 * the case that the buffer space is actually used to a large
1791 * extent and we run out of kernel memory we can simply drop
1792 * the new segments; TCP on the sender will just retransmit it
1793 * later. Setting the buffer size too big may only consume too
1794 * much kernel memory if the application doesn't read() from
1795 * the socket or packet loss or reordering makes use of the
1796 * reassembly queue.
1797 *
1798 * The criteria to step up the receive buffer one notch are:
1799 * 1. the number of bytes received during the time it takes
1800 * one timestamp to be reflected back to us (the RTT);
1801 * 2. received bytes per RTT is within seven eighth of the
1802 * current socket buffer size;
1803 * 3. receive buffer size has not hit maximal automatic size;
1804 *
1805 * This algorithm does one step per RTT at most and only if
1806 * we receive a bulk stream w/o packet losses or reorderings.
1807 * Shrinking the buffer during idle times is not necessary as
1808 * it doesn't consume any memory when idle.
1809 *
1810 * TODO: Only step up if the application is actually serving
1811 * the buffer to better manage the socket buffer resources.
1812 */
1813 if (V_tcp_do_autorcvbuf &&
1814 to.to_tsecr &&
1815 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1816 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
1817 to.to_tsecr - tp->rfbuf_ts < hz) {
1818 if (tp->rfbuf_cnt >
1819 (so->so_rcv.sb_hiwat / 8 * 7) &&
1820 so->so_rcv.sb_hiwat <
1821 V_tcp_autorcvbuf_max) {
1822 newsize =
1823 min(so->so_rcv.sb_hiwat +
1824 V_tcp_autorcvbuf_inc,
1825 V_tcp_autorcvbuf_max);
1826 }
1827 /* Start over with next RTT. */
1828 tp->rfbuf_ts = 0;
1829 tp->rfbuf_cnt = 0;
1830 } else
1831 tp->rfbuf_cnt += tlen; /* add up */
1832 }
1833
1834 /* Add data to socket buffer. */
1835 SOCKBUF_LOCK(&so->so_rcv);
1836 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1837 m_freem(m);
1838 } else {
1839 /*
1840 * Set new socket buffer size.
1841 * Give up when limit is reached.
1842 */
1843 if (newsize)
1844 if (!sbreserve_locked(&so->so_rcv,
1845 newsize, so, NULL))
1846 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1847 m_adj(m, drop_hdrlen); /* delayed header drop */
1848 sbappendstream_locked(&so->so_rcv, m);
1849 }
1850 /* NB: sorwakeup_locked() does an implicit unlock. */
1851 sorwakeup_locked(so);
1852 if (DELAY_ACK(tp)) {
1853 tp->t_flags |= TF_DELACK;
1854 } else {
1855 tp->t_flags |= TF_ACKNOW;
1856 tcp_output(tp);
1857 }
1858 goto check_delack;
1859 }
1860 }
1861
1862 /*
1863 * Calculate amount of space in receive window,
1864 * and then do TCP input processing.
1865 * Receive window is amount of space in rcv queue,
1866 * but not less than advertised window.
1867 */
1868 win = sbspace(&so->so_rcv);
1869 if (win < 0)
1870 win = 0;
1871 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1872
1873 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1874 tp->rfbuf_ts = 0;
1875 tp->rfbuf_cnt = 0;
1876
1877 switch (tp->t_state) {
1878
1879 /*
1880 * If the state is SYN_RECEIVED:
1881 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1882 */
1883 case TCPS_SYN_RECEIVED:
1884 if ((thflags & TH_ACK) &&
1885 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1886 SEQ_GT(th->th_ack, tp->snd_max))) {
1887 rstreason = BANDLIM_RST_OPENPORT;
1888 goto dropwithreset;
1889 }
1890 break;
1891
1892 /*
1893 * If the state is SYN_SENT:
1894 * if seg contains an ACK, but not for our SYN, drop the input.
1895 * if seg contains a RST, then drop the connection.
1896 * if seg does not contain SYN, then drop it.
1897 * Otherwise this is an acceptable SYN segment
1898 * initialize tp->rcv_nxt and tp->irs
1899 * if seg contains ack then advance tp->snd_una
1900 * if seg contains an ECE and ECN support is enabled, the stream
1901 * is ECN capable.
1902 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1903 * arrange for segment to be acked (eventually)
1904 * continue processing rest of data/controls, beginning with URG
1905 */
1906 case TCPS_SYN_SENT:
1907 if ((thflags & TH_ACK) &&
1908 (SEQ_LEQ(th->th_ack, tp->iss) ||
1909 SEQ_GT(th->th_ack, tp->snd_max))) {
1910 rstreason = BANDLIM_UNLIMITED;
1911 goto dropwithreset;
1912 }
1431 if (ti_locked == TI_WLOCKED) {
1432 INP_INFO_WUNLOCK(&V_tcbinfo);
1433 ti_locked = TI_UNLOCKED;
1434 }
1435#ifdef INVARIANTS
1436 else {
1437 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock "
1438 "ti_locked: %d", __func__, ti_locked));
1439 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1440 }
1441#endif
1442
1443 if (inp != NULL)
1444 INP_WUNLOCK(inp);
1445
1446drop:
1447 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1448 if (s != NULL)
1449 free(s, M_TCPLOG);
1450 if (m != NULL)
1451 m_freem(m);
1452}
1453
1454static void
1455tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
1456 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
1457 int ti_locked)
1458{
1459 int thflags, acked, ourfinisacked, needoutput = 0;
1460 int rstreason, todrop, win;
1461 u_long tiwin;
1462 char *s;
1463 struct in_conninfo *inc;
1464 struct tcpopt to;
1465
1466#ifdef TCPDEBUG
1467 /*
1468 * The size of tcp_saveipgen must be the size of the max ip header,
1469 * now IPv6.
1470 */
1471 u_char tcp_saveipgen[IP6_HDR_LEN];
1472 struct tcphdr tcp_savetcp;
1473 short ostate = 0;
1474#endif
1475 thflags = th->th_flags;
1476 inc = &tp->t_inpcb->inp_inc;
1477 tp->sackhint.last_sack_ack = 0;
1478
1479 /*
1480 * If this is either a state-changing packet or current state isn't
1481 * established, we require a write lock on tcbinfo. Otherwise, we
1482 * allow the tcbinfo to be in either alocked or unlocked, as the
1483 * caller may have unnecessarily acquired a write lock due to a race.
1484 */
1485 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
1486 tp->t_state != TCPS_ESTABLISHED) {
1487 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for "
1488 "SYN/FIN/RST/!EST", __func__, ti_locked));
1489 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1490 } else {
1491#ifdef INVARIANTS
1492 if (ti_locked == TI_WLOCKED)
1493 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1494 else {
1495 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
1496 "ti_locked: %d", __func__, ti_locked));
1497 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1498 }
1499#endif
1500 }
1501 INP_WLOCK_ASSERT(tp->t_inpcb);
1502 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
1503 __func__));
1504 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
1505 __func__));
1506
1507 /*
1508 * Segment received on connection.
1509 * Reset idle time and keep-alive timer.
1510 * XXX: This should be done after segment
1511 * validation to ignore broken/spoofed segs.
1512 */
1513 tp->t_rcvtime = ticks;
1514 if (TCPS_HAVEESTABLISHED(tp->t_state))
1515 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
1516
1517 /*
1518 * Unscale the window into a 32-bit value.
1519 * For the SYN_SENT state the scale is zero.
1520 */
1521 tiwin = th->th_win << tp->snd_scale;
1522
1523 /*
1524 * TCP ECN processing.
1525 */
1526 if (tp->t_flags & TF_ECN_PERMIT) {
1527 if (thflags & TH_CWR)
1528 tp->t_flags &= ~TF_ECN_SND_ECE;
1529 switch (iptos & IPTOS_ECN_MASK) {
1530 case IPTOS_ECN_CE:
1531 tp->t_flags |= TF_ECN_SND_ECE;
1532 TCPSTAT_INC(tcps_ecn_ce);
1533 break;
1534 case IPTOS_ECN_ECT0:
1535 TCPSTAT_INC(tcps_ecn_ect0);
1536 break;
1537 case IPTOS_ECN_ECT1:
1538 TCPSTAT_INC(tcps_ecn_ect1);
1539 break;
1540 }
1541 /* Congestion experienced. */
1542 if (thflags & TH_ECE) {
1543 cc_cong_signal(tp, th, CC_ECN);
1544 }
1545 }
1546
1547 /*
1548 * Parse options on any incoming segment.
1549 */
1550 tcp_dooptions(&to, (u_char *)(th + 1),
1551 (th->th_off << 2) - sizeof(struct tcphdr),
1552 (thflags & TH_SYN) ? TO_SYN : 0);
1553
1554 /*
1555 * If echoed timestamp is later than the current time,
1556 * fall back to non RFC1323 RTT calculation. Normalize
1557 * timestamp if syncookies were used when this connection
1558 * was established.
1559 */
1560 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
1561 to.to_tsecr -= tp->ts_offset;
1562 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks()))
1563 to.to_tsecr = 0;
1564 }
1565 /*
1566 * If timestamps were negotiated during SYN/ACK they should
1567 * appear on every segment during this session and vice versa.
1568 */
1569 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) {
1570 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1571 log(LOG_DEBUG, "%s; %s: Timestamp missing, "
1572 "no action\n", s, __func__);
1573 free(s, M_TCPLOG);
1574 }
1575 }
1576 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) {
1577 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
1578 log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
1579 "no action\n", s, __func__);
1580 free(s, M_TCPLOG);
1581 }
1582 }
1583
1584 /*
1585 * Process options only when we get SYN/ACK back. The SYN case
1586 * for incoming connections is handled in tcp_syncache.
1587 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1588 * or <SYN,ACK>) segment itself is never scaled.
1589 * XXX this is traditional behavior, may need to be cleaned up.
1590 */
1591 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
1592 if ((to.to_flags & TOF_SCALE) &&
1593 (tp->t_flags & TF_REQ_SCALE)) {
1594 tp->t_flags |= TF_RCVD_SCALE;
1595 tp->snd_scale = to.to_wscale;
1596 }
1597 /*
1598 * Initial send window. It will be updated with
1599 * the next incoming segment to the scaled value.
1600 */
1601 tp->snd_wnd = th->th_win;
1602 if (to.to_flags & TOF_TS) {
1603 tp->t_flags |= TF_RCVD_TSTMP;
1604 tp->ts_recent = to.to_tsval;
1605 tp->ts_recent_age = tcp_ts_getticks();
1606 }
1607 if (to.to_flags & TOF_MSS)
1608 tcp_mss(tp, to.to_mss);
1609 if ((tp->t_flags & TF_SACK_PERMIT) &&
1610 (to.to_flags & TOF_SACKPERM) == 0)
1611 tp->t_flags &= ~TF_SACK_PERMIT;
1612 }
1613
1614 /*
1615 * Header prediction: check for the two common cases
1616 * of a uni-directional data xfer. If the packet has
1617 * no control flags, is in-sequence, the window didn't
1618 * change and we're not retransmitting, it's a
1619 * candidate. If the length is zero and the ack moved
1620 * forward, we're the sender side of the xfer. Just
1621 * free the data acked & wake any higher level process
1622 * that was blocked waiting for space. If the length
1623 * is non-zero and the ack didn't move, we're the
1624 * receiver side. If we're getting packets in-order
1625 * (the reassembly queue is empty), add the data to
1626 * the socket buffer and note that we need a delayed ack.
1627 * Make sure that the hidden state-flags are also off.
1628 * Since we check for TCPS_ESTABLISHED first, it can only
1629 * be TH_NEEDSYN.
1630 */
1631 if (tp->t_state == TCPS_ESTABLISHED &&
1632 th->th_seq == tp->rcv_nxt &&
1633 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
1634 tp->snd_nxt == tp->snd_max &&
1635 tiwin && tiwin == tp->snd_wnd &&
1636 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
1637 LIST_EMPTY(&tp->t_segq) &&
1638 ((to.to_flags & TOF_TS) == 0 ||
1639 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
1640
1641 /*
1642 * If last ACK falls within this segment's sequence numbers,
1643 * record the timestamp.
1644 * NOTE that the test is modified according to the latest
1645 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
1646 */
1647 if ((to.to_flags & TOF_TS) != 0 &&
1648 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
1649 tp->ts_recent_age = tcp_ts_getticks();
1650 tp->ts_recent = to.to_tsval;
1651 }
1652
1653 if (tlen == 0) {
1654 if (SEQ_GT(th->th_ack, tp->snd_una) &&
1655 SEQ_LEQ(th->th_ack, tp->snd_max) &&
1656 !IN_RECOVERY(tp->t_flags) &&
1657 (to.to_flags & TOF_SACK) == 0 &&
1658 TAILQ_EMPTY(&tp->snd_holes)) {
1659 /*
1660 * This is a pure ack for outstanding data.
1661 */
1662 if (ti_locked == TI_WLOCKED)
1663 INP_INFO_WUNLOCK(&V_tcbinfo);
1664 ti_locked = TI_UNLOCKED;
1665
1666 TCPSTAT_INC(tcps_predack);
1667
1668 /*
1669 * "bad retransmit" recovery.
1670 */
1671 if (tp->t_rxtshift == 1 &&
1672 tp->t_flags & TF_PREVVALID &&
1673 (int)(ticks - tp->t_badrxtwin) < 0) {
1674 cc_cong_signal(tp, th, CC_RTO_ERR);
1675 }
1676
1677 /*
1678 * Recalculate the transmit timer / rtt.
1679 *
1680 * Some boxes send broken timestamp replies
1681 * during the SYN+ACK phase, ignore
1682 * timestamps of 0 or we could calculate a
1683 * huge RTT and blow up the retransmit timer.
1684 */
1685 if ((to.to_flags & TOF_TS) != 0 &&
1686 to.to_tsecr) {
1687 u_int t;
1688
1689 t = tcp_ts_getticks() - to.to_tsecr;
1690 if (!tp->t_rttlow || tp->t_rttlow > t)
1691 tp->t_rttlow = t;
1692 tcp_xmit_timer(tp,
1693 TCP_TS_TO_TICKS(t) + 1);
1694 } else if (tp->t_rtttime &&
1695 SEQ_GT(th->th_ack, tp->t_rtseq)) {
1696 if (!tp->t_rttlow ||
1697 tp->t_rttlow > ticks - tp->t_rtttime)
1698 tp->t_rttlow = ticks - tp->t_rtttime;
1699 tcp_xmit_timer(tp,
1700 ticks - tp->t_rtttime);
1701 }
1702 acked = BYTES_THIS_ACK(tp, th);
1703
1704 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
1705 hhook_run_tcp_est_in(tp, th, &to);
1706
1707 TCPSTAT_INC(tcps_rcvackpack);
1708 TCPSTAT_ADD(tcps_rcvackbyte, acked);
1709 sbdrop(&so->so_snd, acked);
1710 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
1711 SEQ_LEQ(th->th_ack, tp->snd_recover))
1712 tp->snd_recover = th->th_ack - 1;
1713
1714 /*
1715 * Let the congestion control algorithm update
1716 * congestion control related information. This
1717 * typically means increasing the congestion
1718 * window.
1719 */
1720 cc_ack_received(tp, th, CC_ACK);
1721
1722 tp->snd_una = th->th_ack;
1723 /*
1724 * Pull snd_wl2 up to prevent seq wrap relative
1725 * to th_ack.
1726 */
1727 tp->snd_wl2 = th->th_ack;
1728 tp->t_dupacks = 0;
1729 m_freem(m);
1730 ND6_HINT(tp); /* Some progress has been made. */
1731
1732 /*
1733 * If all outstanding data are acked, stop
1734 * retransmit timer, otherwise restart timer
1735 * using current (possibly backed-off) value.
1736 * If process is waiting for space,
1737 * wakeup/selwakeup/signal. If data
1738 * are ready to send, let tcp_output
1739 * decide between more output or persist.
1740 */
1741#ifdef TCPDEBUG
1742 if (so->so_options & SO_DEBUG)
1743 tcp_trace(TA_INPUT, ostate, tp,
1744 (void *)tcp_saveipgen,
1745 &tcp_savetcp, 0);
1746#endif
1747 if (tp->snd_una == tp->snd_max)
1748 tcp_timer_activate(tp, TT_REXMT, 0);
1749 else if (!tcp_timer_active(tp, TT_PERSIST))
1750 tcp_timer_activate(tp, TT_REXMT,
1751 tp->t_rxtcur);
1752 sowwakeup(so);
1753 if (so->so_snd.sb_cc)
1754 (void) tcp_output(tp);
1755 goto check_delack;
1756 }
1757 } else if (th->th_ack == tp->snd_una &&
1758 tlen <= sbspace(&so->so_rcv)) {
1759 int newsize = 0; /* automatic sockbuf scaling */
1760
1761 /*
1762 * This is a pure, in-sequence data packet with
1763 * nothing on the reassembly queue and we have enough
1764 * buffer space to take it.
1765 */
1766 if (ti_locked == TI_WLOCKED)
1767 INP_INFO_WUNLOCK(&V_tcbinfo);
1768 ti_locked = TI_UNLOCKED;
1769
1770 /* Clean receiver SACK report if present */
1771 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
1772 tcp_clean_sackreport(tp);
1773 TCPSTAT_INC(tcps_preddat);
1774 tp->rcv_nxt += tlen;
1775 /*
1776 * Pull snd_wl1 up to prevent seq wrap relative to
1777 * th_seq.
1778 */
1779 tp->snd_wl1 = th->th_seq;
1780 /*
1781 * Pull rcv_up up to prevent seq wrap relative to
1782 * rcv_nxt.
1783 */
1784 tp->rcv_up = tp->rcv_nxt;
1785 TCPSTAT_INC(tcps_rcvpack);
1786 TCPSTAT_ADD(tcps_rcvbyte, tlen);
1787 ND6_HINT(tp); /* Some progress has been made */
1788#ifdef TCPDEBUG
1789 if (so->so_options & SO_DEBUG)
1790 tcp_trace(TA_INPUT, ostate, tp,
1791 (void *)tcp_saveipgen, &tcp_savetcp, 0);
1792#endif
1793 /*
1794 * Automatic sizing of receive socket buffer. Often the send
1795 * buffer size is not optimally adjusted to the actual network
1796 * conditions at hand (delay bandwidth product). Setting the
1797 * buffer size too small limits throughput on links with high
1798 * bandwidth and high delay (eg. trans-continental/oceanic links).
1799 *
1800 * On the receive side the socket buffer memory is only rarely
1801 * used to any significant extent. This allows us to be much
1802 * more aggressive in scaling the receive socket buffer. For
1803 * the case that the buffer space is actually used to a large
1804 * extent and we run out of kernel memory we can simply drop
1805 * the new segments; TCP on the sender will just retransmit it
1806 * later. Setting the buffer size too big may only consume too
1807 * much kernel memory if the application doesn't read() from
1808 * the socket or packet loss or reordering makes use of the
1809 * reassembly queue.
1810 *
1811 * The criteria to step up the receive buffer one notch are:
1812 * 1. the number of bytes received during the time it takes
1813 * one timestamp to be reflected back to us (the RTT);
1814 * 2. received bytes per RTT is within seven eighth of the
1815 * current socket buffer size;
1816 * 3. receive buffer size has not hit maximal automatic size;
1817 *
1818 * This algorithm does one step per RTT at most and only if
1819 * we receive a bulk stream w/o packet losses or reorderings.
1820 * Shrinking the buffer during idle times is not necessary as
1821 * it doesn't consume any memory when idle.
1822 *
1823 * TODO: Only step up if the application is actually serving
1824 * the buffer to better manage the socket buffer resources.
1825 */
1826 if (V_tcp_do_autorcvbuf &&
1827 to.to_tsecr &&
1828 (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
1829 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
1830 to.to_tsecr - tp->rfbuf_ts < hz) {
1831 if (tp->rfbuf_cnt >
1832 (so->so_rcv.sb_hiwat / 8 * 7) &&
1833 so->so_rcv.sb_hiwat <
1834 V_tcp_autorcvbuf_max) {
1835 newsize =
1836 min(so->so_rcv.sb_hiwat +
1837 V_tcp_autorcvbuf_inc,
1838 V_tcp_autorcvbuf_max);
1839 }
1840 /* Start over with next RTT. */
1841 tp->rfbuf_ts = 0;
1842 tp->rfbuf_cnt = 0;
1843 } else
1844 tp->rfbuf_cnt += tlen; /* add up */
1845 }
1846
1847 /* Add data to socket buffer. */
1848 SOCKBUF_LOCK(&so->so_rcv);
1849 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1850 m_freem(m);
1851 } else {
1852 /*
1853 * Set new socket buffer size.
1854 * Give up when limit is reached.
1855 */
1856 if (newsize)
1857 if (!sbreserve_locked(&so->so_rcv,
1858 newsize, so, NULL))
1859 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
1860 m_adj(m, drop_hdrlen); /* delayed header drop */
1861 sbappendstream_locked(&so->so_rcv, m);
1862 }
1863 /* NB: sorwakeup_locked() does an implicit unlock. */
1864 sorwakeup_locked(so);
1865 if (DELAY_ACK(tp)) {
1866 tp->t_flags |= TF_DELACK;
1867 } else {
1868 tp->t_flags |= TF_ACKNOW;
1869 tcp_output(tp);
1870 }
1871 goto check_delack;
1872 }
1873 }
1874
1875 /*
1876 * Calculate amount of space in receive window,
1877 * and then do TCP input processing.
1878 * Receive window is amount of space in rcv queue,
1879 * but not less than advertised window.
1880 */
1881 win = sbspace(&so->so_rcv);
1882 if (win < 0)
1883 win = 0;
1884 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1885
1886 /* Reset receive buffer auto scaling when not in bulk receive mode. */
1887 tp->rfbuf_ts = 0;
1888 tp->rfbuf_cnt = 0;
1889
1890 switch (tp->t_state) {
1891
1892 /*
1893 * If the state is SYN_RECEIVED:
1894 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
1895 */
1896 case TCPS_SYN_RECEIVED:
1897 if ((thflags & TH_ACK) &&
1898 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
1899 SEQ_GT(th->th_ack, tp->snd_max))) {
1900 rstreason = BANDLIM_RST_OPENPORT;
1901 goto dropwithreset;
1902 }
1903 break;
1904
1905 /*
1906 * If the state is SYN_SENT:
1907 * if seg contains an ACK, but not for our SYN, drop the input.
1908 * if seg contains a RST, then drop the connection.
1909 * if seg does not contain SYN, then drop it.
1910 * Otherwise this is an acceptable SYN segment
1911 * initialize tp->rcv_nxt and tp->irs
1912 * if seg contains ack then advance tp->snd_una
1913 * if seg contains an ECE and ECN support is enabled, the stream
1914 * is ECN capable.
1915 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
1916 * arrange for segment to be acked (eventually)
1917 * continue processing rest of data/controls, beginning with URG
1918 */
1919 case TCPS_SYN_SENT:
1920 if ((thflags & TH_ACK) &&
1921 (SEQ_LEQ(th->th_ack, tp->iss) ||
1922 SEQ_GT(th->th_ack, tp->snd_max))) {
1923 rstreason = BANDLIM_UNLIMITED;
1924 goto dropwithreset;
1925 }
1913 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST))
1926 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) {
1927 TCP_PROBE5(connect_refused, NULL, tp, m->m_data, tp,
1928 th);
1914 tp = tcp_drop(tp, ECONNREFUSED);
1929 tp = tcp_drop(tp, ECONNREFUSED);
1930 }
1915 if (thflags & TH_RST)
1916 goto drop;
1917 if (!(thflags & TH_SYN))
1918 goto drop;
1919
1920 tp->irs = th->th_seq;
1921 tcp_rcvseqinit(tp);
1922 if (thflags & TH_ACK) {
1923 TCPSTAT_INC(tcps_connects);
1924 soisconnected(so);
1925#ifdef MAC
1926 mac_socketpeer_set_from_mbuf(m, so);
1927#endif
1928 /* Do window scaling on this connection? */
1929 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1930 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1931 tp->rcv_scale = tp->request_r_scale;
1932 }
1933 tp->rcv_adv += imin(tp->rcv_wnd,
1934 TCP_MAXWIN << tp->rcv_scale);
1935 tp->snd_una++; /* SYN is acked */
1936 /*
1937 * If there's data, delay ACK; if there's also a FIN
1938 * ACKNOW will be turned on later.
1939 */
1940 if (DELAY_ACK(tp) && tlen != 0)
1941 tcp_timer_activate(tp, TT_DELACK,
1942 tcp_delacktime);
1943 else
1944 tp->t_flags |= TF_ACKNOW;
1945
1946 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
1947 tp->t_flags |= TF_ECN_PERMIT;
1948 TCPSTAT_INC(tcps_ecn_shs);
1949 }
1950
1951 /*
1952 * Received <SYN,ACK> in SYN_SENT[*] state.
1953 * Transitions:
1954 * SYN_SENT --> ESTABLISHED
1955 * SYN_SENT* --> FIN_WAIT_1
1956 */
1957 tp->t_starttime = ticks;
1958 if (tp->t_flags & TF_NEEDFIN) {
1931 if (thflags & TH_RST)
1932 goto drop;
1933 if (!(thflags & TH_SYN))
1934 goto drop;
1935
1936 tp->irs = th->th_seq;
1937 tcp_rcvseqinit(tp);
1938 if (thflags & TH_ACK) {
1939 TCPSTAT_INC(tcps_connects);
1940 soisconnected(so);
1941#ifdef MAC
1942 mac_socketpeer_set_from_mbuf(m, so);
1943#endif
1944 /* Do window scaling on this connection? */
1945 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
1946 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
1947 tp->rcv_scale = tp->request_r_scale;
1948 }
1949 tp->rcv_adv += imin(tp->rcv_wnd,
1950 TCP_MAXWIN << tp->rcv_scale);
1951 tp->snd_una++; /* SYN is acked */
1952 /*
1953 * If there's data, delay ACK; if there's also a FIN
1954 * ACKNOW will be turned on later.
1955 */
1956 if (DELAY_ACK(tp) && tlen != 0)
1957 tcp_timer_activate(tp, TT_DELACK,
1958 tcp_delacktime);
1959 else
1960 tp->t_flags |= TF_ACKNOW;
1961
1962 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
1963 tp->t_flags |= TF_ECN_PERMIT;
1964 TCPSTAT_INC(tcps_ecn_shs);
1965 }
1966
1967 /*
1968 * Received <SYN,ACK> in SYN_SENT[*] state.
1969 * Transitions:
1970 * SYN_SENT --> ESTABLISHED
1971 * SYN_SENT* --> FIN_WAIT_1
1972 */
1973 tp->t_starttime = ticks;
1974 if (tp->t_flags & TF_NEEDFIN) {
1959 tp->t_state = TCPS_FIN_WAIT_1;
1975 tcp_state_change(tp, TCPS_FIN_WAIT_1);
1960 tp->t_flags &= ~TF_NEEDFIN;
1961 thflags &= ~TH_SYN;
1962 } else {
1976 tp->t_flags &= ~TF_NEEDFIN;
1977 thflags &= ~TH_SYN;
1978 } else {
1963 tp->t_state = TCPS_ESTABLISHED;
1979 tcp_state_change(tp, TCPS_ESTABLISHED);
1980 TCP_PROBE5(connect_established, NULL, tp,
1981 m->m_data, tp, th);
1964 cc_conn_init(tp);
1965 tcp_timer_activate(tp, TT_KEEP,
1966 TP_KEEPIDLE(tp));
1967 }
1968 } else {
1969 /*
1970 * Received initial SYN in SYN-SENT[*] state =>
1971 * simultaneous open. If segment contains CC option
1972 * and there is a cached CC, apply TAO test.
1973 * If it succeeds, connection is * half-synchronized.
1974 * Otherwise, do 3-way handshake:
1975 * SYN-SENT -> SYN-RECEIVED
1976 * SYN-SENT* -> SYN-RECEIVED*
1977 * If there was no CC option, clear cached CC value.
1978 */
1979 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1980 tcp_timer_activate(tp, TT_REXMT, 0);
1982 cc_conn_init(tp);
1983 tcp_timer_activate(tp, TT_KEEP,
1984 TP_KEEPIDLE(tp));
1985 }
1986 } else {
1987 /*
1988 * Received initial SYN in SYN-SENT[*] state =>
1989 * simultaneous open. If segment contains CC option
1990 * and there is a cached CC, apply TAO test.
1991 * If it succeeds, connection is * half-synchronized.
1992 * Otherwise, do 3-way handshake:
1993 * SYN-SENT -> SYN-RECEIVED
1994 * SYN-SENT* -> SYN-RECEIVED*
1995 * If there was no CC option, clear cached CC value.
1996 */
1997 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
1998 tcp_timer_activate(tp, TT_REXMT, 0);
1981 tp->t_state = TCPS_SYN_RECEIVED;
1999 tcp_state_change(tp, TCPS_SYN_RECEIVED);
1982 }
1983
1984 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: "
1985 "ti_locked %d", __func__, ti_locked));
1986 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
1987 INP_WLOCK_ASSERT(tp->t_inpcb);
1988
1989 /*
1990 * Advance th->th_seq to correspond to first data byte.
1991 * If data, trim to stay within window,
1992 * dropping FIN if necessary.
1993 */
1994 th->th_seq++;
1995 if (tlen > tp->rcv_wnd) {
1996 todrop = tlen - tp->rcv_wnd;
1997 m_adj(m, -todrop);
1998 tlen = tp->rcv_wnd;
1999 thflags &= ~TH_FIN;
2000 TCPSTAT_INC(tcps_rcvpackafterwin);
2001 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2002 }
2003 tp->snd_wl1 = th->th_seq - 1;
2004 tp->rcv_up = th->th_seq;
2005 /*
2006 * Client side of transaction: already sent SYN and data.
2007 * If the remote host used T/TCP to validate the SYN,
2008 * our data will be ACK'd; if so, enter normal data segment
2009 * processing in the middle of step 5, ack processing.
2010 * Otherwise, goto step 6.
2011 */
2012 if (thflags & TH_ACK)
2013 goto process_ACK;
2014
2015 goto step6;
2016
2017 /*
2018 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2019 * do normal processing.
2020 *
2021 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2022 */
2023 case TCPS_LAST_ACK:
2024 case TCPS_CLOSING:
2025 break; /* continue normal processing */
2026 }
2027
2028 /*
2029 * States other than LISTEN or SYN_SENT.
2030 * First check the RST flag and sequence number since reset segments
2031 * are exempt from the timestamp and connection count tests. This
2032 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2033 * below which allowed reset segments in half the sequence space
2034 * to fall though and be processed (which gives forged reset
2035 * segments with a random sequence number a 50 percent chance of
2036 * killing a connection).
2037 * Then check timestamp, if present.
2038 * Then check the connection count, if present.
2039 * Then check that at least some bytes of segment are within
2040 * receive window. If segment begins before rcv_nxt,
2041 * drop leading data (and SYN); if nothing left, just ack.
2042 *
2043 *
2044 * If the RST bit is set, check the sequence number to see
2045 * if this is a valid reset segment.
2046 * RFC 793 page 37:
2047 * In all states except SYN-SENT, all reset (RST) segments
2048 * are validated by checking their SEQ-fields. A reset is
2049 * valid if its sequence number is in the window.
2050 * Note: this does not take into account delayed ACKs, so
2051 * we should test against last_ack_sent instead of rcv_nxt.
2052 * The sequence number in the reset segment is normally an
2053 * echo of our outgoing acknowlegement numbers, but some hosts
2054 * send a reset with the sequence number at the rightmost edge
2055 * of our receive window, and we have to handle this case.
2056 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
2057 * that brute force RST attacks are possible. To combat this,
2058 * we use a much stricter check while in the ESTABLISHED state,
2059 * only accepting RSTs where the sequence number is equal to
2060 * last_ack_sent. In all other states (the states in which a
2061 * RST is more likely), the more permissive check is used.
2062 * If we have multiple segments in flight, the initial reset
2063 * segment sequence numbers will be to the left of last_ack_sent,
2064 * but they will eventually catch up.
2065 * In any case, it never made sense to trim reset segments to
2066 * fit the receive window since RFC 1122 says:
2067 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
2068 *
2069 * A TCP SHOULD allow a received RST segment to include data.
2070 *
2071 * DISCUSSION
2072 * It has been suggested that a RST segment could contain
2073 * ASCII text that encoded and explained the cause of the
2074 * RST. No standard has yet been established for such
2075 * data.
2076 *
2077 * If the reset segment passes the sequence number test examine
2078 * the state:
2079 * SYN_RECEIVED STATE:
2080 * If passive open, return to LISTEN state.
2081 * If active open, inform user that connection was refused.
2082 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
2083 * Inform user that connection was reset, and close tcb.
2084 * CLOSING, LAST_ACK STATES:
2085 * Close the tcb.
2086 * TIME_WAIT STATE:
2087 * Drop the segment - see Stevens, vol. 2, p. 964 and
2088 * RFC 1337.
2089 */
2090 if (thflags & TH_RST) {
2091 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2092 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2093 switch (tp->t_state) {
2094
2095 case TCPS_SYN_RECEIVED:
2096 so->so_error = ECONNREFUSED;
2097 goto close;
2098
2099 case TCPS_ESTABLISHED:
2100 if (V_tcp_insecure_rst == 0 &&
2101 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
2102 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
2103 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2104 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
2105 TCPSTAT_INC(tcps_badrst);
2106 goto drop;
2107 }
2108 /* FALLTHROUGH */
2109 case TCPS_FIN_WAIT_1:
2110 case TCPS_FIN_WAIT_2:
2111 case TCPS_CLOSE_WAIT:
2112 so->so_error = ECONNRESET;
2113 close:
2114 KASSERT(ti_locked == TI_WLOCKED,
2115 ("tcp_do_segment: TH_RST 1 ti_locked %d",
2116 ti_locked));
2117 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2118
2000 }
2001
2002 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: "
2003 "ti_locked %d", __func__, ti_locked));
2004 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2005 INP_WLOCK_ASSERT(tp->t_inpcb);
2006
2007 /*
2008 * Advance th->th_seq to correspond to first data byte.
2009 * If data, trim to stay within window,
2010 * dropping FIN if necessary.
2011 */
2012 th->th_seq++;
2013 if (tlen > tp->rcv_wnd) {
2014 todrop = tlen - tp->rcv_wnd;
2015 m_adj(m, -todrop);
2016 tlen = tp->rcv_wnd;
2017 thflags &= ~TH_FIN;
2018 TCPSTAT_INC(tcps_rcvpackafterwin);
2019 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2020 }
2021 tp->snd_wl1 = th->th_seq - 1;
2022 tp->rcv_up = th->th_seq;
2023 /*
2024 * Client side of transaction: already sent SYN and data.
2025 * If the remote host used T/TCP to validate the SYN,
2026 * our data will be ACK'd; if so, enter normal data segment
2027 * processing in the middle of step 5, ack processing.
2028 * Otherwise, goto step 6.
2029 */
2030 if (thflags & TH_ACK)
2031 goto process_ACK;
2032
2033 goto step6;
2034
2035 /*
2036 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2037 * do normal processing.
2038 *
2039 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2040 */
2041 case TCPS_LAST_ACK:
2042 case TCPS_CLOSING:
2043 break; /* continue normal processing */
2044 }
2045
2046 /*
2047 * States other than LISTEN or SYN_SENT.
2048 * First check the RST flag and sequence number since reset segments
2049 * are exempt from the timestamp and connection count tests. This
2050 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2051 * below which allowed reset segments in half the sequence space
2052 * to fall though and be processed (which gives forged reset
2053 * segments with a random sequence number a 50 percent chance of
2054 * killing a connection).
2055 * Then check timestamp, if present.
2056 * Then check the connection count, if present.
2057 * Then check that at least some bytes of segment are within
2058 * receive window. If segment begins before rcv_nxt,
2059 * drop leading data (and SYN); if nothing left, just ack.
2060 *
2061 *
2062 * If the RST bit is set, check the sequence number to see
2063 * if this is a valid reset segment.
2064 * RFC 793 page 37:
2065 * In all states except SYN-SENT, all reset (RST) segments
2066 * are validated by checking their SEQ-fields. A reset is
2067 * valid if its sequence number is in the window.
2068 * Note: this does not take into account delayed ACKs, so
2069 * we should test against last_ack_sent instead of rcv_nxt.
2070 * The sequence number in the reset segment is normally an
2071 * echo of our outgoing acknowlegement numbers, but some hosts
2072 * send a reset with the sequence number at the rightmost edge
2073 * of our receive window, and we have to handle this case.
2074 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
2075 * that brute force RST attacks are possible. To combat this,
2076 * we use a much stricter check while in the ESTABLISHED state,
2077 * only accepting RSTs where the sequence number is equal to
2078 * last_ack_sent. In all other states (the states in which a
2079 * RST is more likely), the more permissive check is used.
2080 * If we have multiple segments in flight, the initial reset
2081 * segment sequence numbers will be to the left of last_ack_sent,
2082 * but they will eventually catch up.
2083 * In any case, it never made sense to trim reset segments to
2084 * fit the receive window since RFC 1122 says:
2085 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
2086 *
2087 * A TCP SHOULD allow a received RST segment to include data.
2088 *
2089 * DISCUSSION
2090 * It has been suggested that a RST segment could contain
2091 * ASCII text that encoded and explained the cause of the
2092 * RST. No standard has yet been established for such
2093 * data.
2094 *
2095 * If the reset segment passes the sequence number test examine
2096 * the state:
2097 * SYN_RECEIVED STATE:
2098 * If passive open, return to LISTEN state.
2099 * If active open, inform user that connection was refused.
2100 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
2101 * Inform user that connection was reset, and close tcb.
2102 * CLOSING, LAST_ACK STATES:
2103 * Close the tcb.
2104 * TIME_WAIT STATE:
2105 * Drop the segment - see Stevens, vol. 2, p. 964 and
2106 * RFC 1337.
2107 */
2108 if (thflags & TH_RST) {
2109 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2110 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2111 switch (tp->t_state) {
2112
2113 case TCPS_SYN_RECEIVED:
2114 so->so_error = ECONNREFUSED;
2115 goto close;
2116
2117 case TCPS_ESTABLISHED:
2118 if (V_tcp_insecure_rst == 0 &&
2119 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
2120 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
2121 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
2122 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
2123 TCPSTAT_INC(tcps_badrst);
2124 goto drop;
2125 }
2126 /* FALLTHROUGH */
2127 case TCPS_FIN_WAIT_1:
2128 case TCPS_FIN_WAIT_2:
2129 case TCPS_CLOSE_WAIT:
2130 so->so_error = ECONNRESET;
2131 close:
2132 KASSERT(ti_locked == TI_WLOCKED,
2133 ("tcp_do_segment: TH_RST 1 ti_locked %d",
2134 ti_locked));
2135 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2136
2119 tp->t_state = TCPS_CLOSED;
2137 tcp_state_change(tp, TCPS_CLOSED);
2120 TCPSTAT_INC(tcps_drops);
2121 tp = tcp_close(tp);
2122 break;
2123
2124 case TCPS_CLOSING:
2125 case TCPS_LAST_ACK:
2126 KASSERT(ti_locked == TI_WLOCKED,
2127 ("tcp_do_segment: TH_RST 2 ti_locked %d",
2128 ti_locked));
2129 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2130
2131 tp = tcp_close(tp);
2132 break;
2133 }
2134 }
2135 goto drop;
2136 }
2137
2138 /*
2139 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2140 * and it's less than ts_recent, drop it.
2141 */
2142 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2143 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2144
2145 /* Check to see if ts_recent is over 24 days old. */
2146 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2147 /*
2148 * Invalidate ts_recent. If this segment updates
2149 * ts_recent, the age will be reset later and ts_recent
2150 * will get a valid value. If it does not, setting
2151 * ts_recent to zero will at least satisfy the
2152 * requirement that zero be placed in the timestamp
2153 * echo reply when ts_recent isn't valid. The
2154 * age isn't reset until we get a valid ts_recent
2155 * because we don't want out-of-order segments to be
2156 * dropped when ts_recent is old.
2157 */
2158 tp->ts_recent = 0;
2159 } else {
2160 TCPSTAT_INC(tcps_rcvduppack);
2161 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2162 TCPSTAT_INC(tcps_pawsdrop);
2163 if (tlen)
2164 goto dropafterack;
2165 goto drop;
2166 }
2167 }
2168
2169 /*
2170 * In the SYN-RECEIVED state, validate that the packet belongs to
2171 * this connection before trimming the data to fit the receive
2172 * window. Check the sequence number versus IRS since we know
2173 * the sequence numbers haven't wrapped. This is a partial fix
2174 * for the "LAND" DoS attack.
2175 */
2176 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2177 rstreason = BANDLIM_RST_OPENPORT;
2178 goto dropwithreset;
2179 }
2180
2181 todrop = tp->rcv_nxt - th->th_seq;
2182 if (todrop > 0) {
2183 /*
2184 * If this is a duplicate SYN for our current connection,
2185 * advance over it and pretend and it's not a SYN.
2186 */
2187 if (thflags & TH_SYN && th->th_seq == tp->irs) {
2188 thflags &= ~TH_SYN;
2189 th->th_seq++;
2190 if (th->th_urp > 1)
2191 th->th_urp--;
2192 else
2193 thflags &= ~TH_URG;
2194 todrop--;
2195 }
2196 /*
2197 * Following if statement from Stevens, vol. 2, p. 960.
2198 */
2199 if (todrop > tlen
2200 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2201 /*
2202 * Any valid FIN must be to the left of the window.
2203 * At this point the FIN must be a duplicate or out
2204 * of sequence; drop it.
2205 */
2206 thflags &= ~TH_FIN;
2207
2208 /*
2209 * Send an ACK to resynchronize and drop any data.
2210 * But keep on processing for RST or ACK.
2211 */
2212 tp->t_flags |= TF_ACKNOW;
2213 todrop = tlen;
2214 TCPSTAT_INC(tcps_rcvduppack);
2215 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2216 } else {
2217 TCPSTAT_INC(tcps_rcvpartduppack);
2218 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2219 }
2220 drop_hdrlen += todrop; /* drop from the top afterwards */
2221 th->th_seq += todrop;
2222 tlen -= todrop;
2223 if (th->th_urp > todrop)
2224 th->th_urp -= todrop;
2225 else {
2226 thflags &= ~TH_URG;
2227 th->th_urp = 0;
2228 }
2229 }
2230
2231 /*
2232 * If new data are received on a connection after the
2233 * user processes are gone, then RST the other end.
2234 */
2235 if ((so->so_state & SS_NOFDREF) &&
2236 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2237 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && "
2238 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2239 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2240
2241 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2242 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2243 "after socket was closed, "
2244 "sending RST and removing tcpcb\n",
2245 s, __func__, tcpstates[tp->t_state], tlen);
2246 free(s, M_TCPLOG);
2247 }
2248 tp = tcp_close(tp);
2249 TCPSTAT_INC(tcps_rcvafterclose);
2250 rstreason = BANDLIM_UNLIMITED;
2251 goto dropwithreset;
2252 }
2253
2254 /*
2255 * If segment ends after window, drop trailing data
2256 * (and PUSH and FIN); if nothing left, just ACK.
2257 */
2258 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2259 if (todrop > 0) {
2260 TCPSTAT_INC(tcps_rcvpackafterwin);
2261 if (todrop >= tlen) {
2262 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2263 /*
2264 * If window is closed can only take segments at
2265 * window edge, and have to drop data and PUSH from
2266 * incoming segments. Continue processing, but
2267 * remember to ack. Otherwise, drop segment
2268 * and ack.
2269 */
2270 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2271 tp->t_flags |= TF_ACKNOW;
2272 TCPSTAT_INC(tcps_rcvwinprobe);
2273 } else
2274 goto dropafterack;
2275 } else
2276 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2277 m_adj(m, -todrop);
2278 tlen -= todrop;
2279 thflags &= ~(TH_PUSH|TH_FIN);
2280 }
2281
2282 /*
2283 * If last ACK falls within this segment's sequence numbers,
2284 * record its timestamp.
2285 * NOTE:
2286 * 1) That the test incorporates suggestions from the latest
2287 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2288 * 2) That updating only on newer timestamps interferes with
2289 * our earlier PAWS tests, so this check should be solely
2290 * predicated on the sequence space of this segment.
2291 * 3) That we modify the segment boundary check to be
2292 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2293 * instead of RFC1323's
2294 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2295 * This modified check allows us to overcome RFC1323's
2296 * limitations as described in Stevens TCP/IP Illustrated
2297 * Vol. 2 p.869. In such cases, we can still calculate the
2298 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2299 */
2300 if ((to.to_flags & TOF_TS) != 0 &&
2301 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2302 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2303 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2304 tp->ts_recent_age = tcp_ts_getticks();
2305 tp->ts_recent = to.to_tsval;
2306 }
2307
2308 /*
2309 * If a SYN is in the window, then this is an
2310 * error and we send an RST and drop the connection.
2311 */
2312 if (thflags & TH_SYN) {
2313 KASSERT(ti_locked == TI_WLOCKED,
2314 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2315 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2316
2317 tp = tcp_drop(tp, ECONNRESET);
2318 rstreason = BANDLIM_UNLIMITED;
2319 goto drop;
2320 }
2321
2322 /*
2323 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2324 * flag is on (half-synchronized state), then queue data for
2325 * later processing; else drop segment and return.
2326 */
2327 if ((thflags & TH_ACK) == 0) {
2328 if (tp->t_state == TCPS_SYN_RECEIVED ||
2329 (tp->t_flags & TF_NEEDSYN))
2330 goto step6;
2331 else if (tp->t_flags & TF_ACKNOW)
2332 goto dropafterack;
2333 else
2334 goto drop;
2335 }
2336
2337 /*
2338 * Ack processing.
2339 */
2340 switch (tp->t_state) {
2341
2342 /*
2343 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2344 * ESTABLISHED state and continue processing.
2345 * The ACK was checked above.
2346 */
2347 case TCPS_SYN_RECEIVED:
2348
2349 TCPSTAT_INC(tcps_connects);
2350 soisconnected(so);
2351 /* Do window scaling? */
2352 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2353 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2354 tp->rcv_scale = tp->request_r_scale;
2355 tp->snd_wnd = tiwin;
2356 }
2357 /*
2358 * Make transitions:
2359 * SYN-RECEIVED -> ESTABLISHED
2360 * SYN-RECEIVED* -> FIN-WAIT-1
2361 */
2362 tp->t_starttime = ticks;
2363 if (tp->t_flags & TF_NEEDFIN) {
2138 TCPSTAT_INC(tcps_drops);
2139 tp = tcp_close(tp);
2140 break;
2141
2142 case TCPS_CLOSING:
2143 case TCPS_LAST_ACK:
2144 KASSERT(ti_locked == TI_WLOCKED,
2145 ("tcp_do_segment: TH_RST 2 ti_locked %d",
2146 ti_locked));
2147 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2148
2149 tp = tcp_close(tp);
2150 break;
2151 }
2152 }
2153 goto drop;
2154 }
2155
2156 /*
2157 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2158 * and it's less than ts_recent, drop it.
2159 */
2160 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2161 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2162
2163 /* Check to see if ts_recent is over 24 days old. */
2164 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
2165 /*
2166 * Invalidate ts_recent. If this segment updates
2167 * ts_recent, the age will be reset later and ts_recent
2168 * will get a valid value. If it does not, setting
2169 * ts_recent to zero will at least satisfy the
2170 * requirement that zero be placed in the timestamp
2171 * echo reply when ts_recent isn't valid. The
2172 * age isn't reset until we get a valid ts_recent
2173 * because we don't want out-of-order segments to be
2174 * dropped when ts_recent is old.
2175 */
2176 tp->ts_recent = 0;
2177 } else {
2178 TCPSTAT_INC(tcps_rcvduppack);
2179 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
2180 TCPSTAT_INC(tcps_pawsdrop);
2181 if (tlen)
2182 goto dropafterack;
2183 goto drop;
2184 }
2185 }
2186
2187 /*
2188 * In the SYN-RECEIVED state, validate that the packet belongs to
2189 * this connection before trimming the data to fit the receive
2190 * window. Check the sequence number versus IRS since we know
2191 * the sequence numbers haven't wrapped. This is a partial fix
2192 * for the "LAND" DoS attack.
2193 */
2194 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2195 rstreason = BANDLIM_RST_OPENPORT;
2196 goto dropwithreset;
2197 }
2198
2199 todrop = tp->rcv_nxt - th->th_seq;
2200 if (todrop > 0) {
2201 /*
2202 * If this is a duplicate SYN for our current connection,
2203 * advance over it and pretend and it's not a SYN.
2204 */
2205 if (thflags & TH_SYN && th->th_seq == tp->irs) {
2206 thflags &= ~TH_SYN;
2207 th->th_seq++;
2208 if (th->th_urp > 1)
2209 th->th_urp--;
2210 else
2211 thflags &= ~TH_URG;
2212 todrop--;
2213 }
2214 /*
2215 * Following if statement from Stevens, vol. 2, p. 960.
2216 */
2217 if (todrop > tlen
2218 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2219 /*
2220 * Any valid FIN must be to the left of the window.
2221 * At this point the FIN must be a duplicate or out
2222 * of sequence; drop it.
2223 */
2224 thflags &= ~TH_FIN;
2225
2226 /*
2227 * Send an ACK to resynchronize and drop any data.
2228 * But keep on processing for RST or ACK.
2229 */
2230 tp->t_flags |= TF_ACKNOW;
2231 todrop = tlen;
2232 TCPSTAT_INC(tcps_rcvduppack);
2233 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
2234 } else {
2235 TCPSTAT_INC(tcps_rcvpartduppack);
2236 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
2237 }
2238 drop_hdrlen += todrop; /* drop from the top afterwards */
2239 th->th_seq += todrop;
2240 tlen -= todrop;
2241 if (th->th_urp > todrop)
2242 th->th_urp -= todrop;
2243 else {
2244 thflags &= ~TH_URG;
2245 th->th_urp = 0;
2246 }
2247 }
2248
2249 /*
2250 * If new data are received on a connection after the
2251 * user processes are gone, then RST the other end.
2252 */
2253 if ((so->so_state & SS_NOFDREF) &&
2254 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
2255 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && "
2256 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
2257 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2258
2259 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
2260 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data "
2261 "after socket was closed, "
2262 "sending RST and removing tcpcb\n",
2263 s, __func__, tcpstates[tp->t_state], tlen);
2264 free(s, M_TCPLOG);
2265 }
2266 tp = tcp_close(tp);
2267 TCPSTAT_INC(tcps_rcvafterclose);
2268 rstreason = BANDLIM_UNLIMITED;
2269 goto dropwithreset;
2270 }
2271
2272 /*
2273 * If segment ends after window, drop trailing data
2274 * (and PUSH and FIN); if nothing left, just ACK.
2275 */
2276 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
2277 if (todrop > 0) {
2278 TCPSTAT_INC(tcps_rcvpackafterwin);
2279 if (todrop >= tlen) {
2280 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
2281 /*
2282 * If window is closed can only take segments at
2283 * window edge, and have to drop data and PUSH from
2284 * incoming segments. Continue processing, but
2285 * remember to ack. Otherwise, drop segment
2286 * and ack.
2287 */
2288 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
2289 tp->t_flags |= TF_ACKNOW;
2290 TCPSTAT_INC(tcps_rcvwinprobe);
2291 } else
2292 goto dropafterack;
2293 } else
2294 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
2295 m_adj(m, -todrop);
2296 tlen -= todrop;
2297 thflags &= ~(TH_PUSH|TH_FIN);
2298 }
2299
2300 /*
2301 * If last ACK falls within this segment's sequence numbers,
2302 * record its timestamp.
2303 * NOTE:
2304 * 1) That the test incorporates suggestions from the latest
2305 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2306 * 2) That updating only on newer timestamps interferes with
2307 * our earlier PAWS tests, so this check should be solely
2308 * predicated on the sequence space of this segment.
2309 * 3) That we modify the segment boundary check to be
2310 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
2311 * instead of RFC1323's
2312 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
2313 * This modified check allows us to overcome RFC1323's
2314 * limitations as described in Stevens TCP/IP Illustrated
2315 * Vol. 2 p.869. In such cases, we can still calculate the
2316 * RTT correctly when RCV.NXT == Last.ACK.Sent.
2317 */
2318 if ((to.to_flags & TOF_TS) != 0 &&
2319 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
2320 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
2321 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
2322 tp->ts_recent_age = tcp_ts_getticks();
2323 tp->ts_recent = to.to_tsval;
2324 }
2325
2326 /*
2327 * If a SYN is in the window, then this is an
2328 * error and we send an RST and drop the connection.
2329 */
2330 if (thflags & TH_SYN) {
2331 KASSERT(ti_locked == TI_WLOCKED,
2332 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
2333 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2334
2335 tp = tcp_drop(tp, ECONNRESET);
2336 rstreason = BANDLIM_UNLIMITED;
2337 goto drop;
2338 }
2339
2340 /*
2341 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
2342 * flag is on (half-synchronized state), then queue data for
2343 * later processing; else drop segment and return.
2344 */
2345 if ((thflags & TH_ACK) == 0) {
2346 if (tp->t_state == TCPS_SYN_RECEIVED ||
2347 (tp->t_flags & TF_NEEDSYN))
2348 goto step6;
2349 else if (tp->t_flags & TF_ACKNOW)
2350 goto dropafterack;
2351 else
2352 goto drop;
2353 }
2354
2355 /*
2356 * Ack processing.
2357 */
2358 switch (tp->t_state) {
2359
2360 /*
2361 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
2362 * ESTABLISHED state and continue processing.
2363 * The ACK was checked above.
2364 */
2365 case TCPS_SYN_RECEIVED:
2366
2367 TCPSTAT_INC(tcps_connects);
2368 soisconnected(so);
2369 /* Do window scaling? */
2370 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2371 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2372 tp->rcv_scale = tp->request_r_scale;
2373 tp->snd_wnd = tiwin;
2374 }
2375 /*
2376 * Make transitions:
2377 * SYN-RECEIVED -> ESTABLISHED
2378 * SYN-RECEIVED* -> FIN-WAIT-1
2379 */
2380 tp->t_starttime = ticks;
2381 if (tp->t_flags & TF_NEEDFIN) {
2364 tp->t_state = TCPS_FIN_WAIT_1;
2382 tcp_state_change(tp, TCPS_FIN_WAIT_1);
2365 tp->t_flags &= ~TF_NEEDFIN;
2366 } else {
2383 tp->t_flags &= ~TF_NEEDFIN;
2384 } else {
2367 tp->t_state = TCPS_ESTABLISHED;
2385 tcp_state_change(tp, TCPS_ESTABLISHED);
2386 TCP_PROBE5(accept_established, NULL, tp, m->m_data, tp,
2387 th);
2368 cc_conn_init(tp);
2369 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2370 }
2371 /*
2372 * If segment contains data or ACK, will call tcp_reass()
2373 * later; if not, do so now to pass queued data to user.
2374 */
2375 if (tlen == 0 && (thflags & TH_FIN) == 0)
2376 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2377 (struct mbuf *)0);
2378 tp->snd_wl1 = th->th_seq - 1;
2379 /* FALLTHROUGH */
2380
2381 /*
2382 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2383 * ACKs. If the ack is in the range
2384 * tp->snd_una < th->th_ack <= tp->snd_max
2385 * then advance tp->snd_una to th->th_ack and drop
2386 * data from the retransmission queue. If this ACK reflects
2387 * more up to date window information we update our window information.
2388 */
2389 case TCPS_ESTABLISHED:
2390 case TCPS_FIN_WAIT_1:
2391 case TCPS_FIN_WAIT_2:
2392 case TCPS_CLOSE_WAIT:
2393 case TCPS_CLOSING:
2394 case TCPS_LAST_ACK:
2395 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2396 TCPSTAT_INC(tcps_rcvacktoomuch);
2397 goto dropafterack;
2398 }
2399 if ((tp->t_flags & TF_SACK_PERMIT) &&
2400 ((to.to_flags & TOF_SACK) ||
2401 !TAILQ_EMPTY(&tp->snd_holes)))
2402 tcp_sack_doack(tp, &to, th->th_ack);
2403
2404 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2405 hhook_run_tcp_est_in(tp, th, &to);
2406
2407 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2408 if (tlen == 0 && tiwin == tp->snd_wnd) {
2409 TCPSTAT_INC(tcps_rcvdupack);
2410 /*
2411 * If we have outstanding data (other than
2412 * a window probe), this is a completely
2413 * duplicate ack (ie, window info didn't
2414 * change), the ack is the biggest we've
2415 * seen and we've seen exactly our rexmt
2416 * threshhold of them, assume a packet
2417 * has been dropped and retransmit it.
2418 * Kludge snd_nxt & the congestion
2419 * window so we send only this one
2420 * packet.
2421 *
2422 * We know we're losing at the current
2423 * window size so do congestion avoidance
2424 * (set ssthresh to half the current window
2425 * and pull our congestion window back to
2426 * the new ssthresh).
2427 *
2428 * Dup acks mean that packets have left the
2429 * network (they're now cached at the receiver)
2430 * so bump cwnd by the amount in the receiver
2431 * to keep a constant cwnd packets in the
2432 * network.
2433 *
2434 * When using TCP ECN, notify the peer that
2435 * we reduced the cwnd.
2436 */
2437 if (!tcp_timer_active(tp, TT_REXMT) ||
2438 th->th_ack != tp->snd_una)
2439 tp->t_dupacks = 0;
2440 else if (++tp->t_dupacks > tcprexmtthresh ||
2441 IN_FASTRECOVERY(tp->t_flags)) {
2442 cc_ack_received(tp, th, CC_DUPACK);
2443 if ((tp->t_flags & TF_SACK_PERMIT) &&
2444 IN_FASTRECOVERY(tp->t_flags)) {
2445 int awnd;
2446
2447 /*
2448 * Compute the amount of data in flight first.
2449 * We can inject new data into the pipe iff
2450 * we have less than 1/2 the original window's
2451 * worth of data in flight.
2452 */
2453 awnd = (tp->snd_nxt - tp->snd_fack) +
2454 tp->sackhint.sack_bytes_rexmit;
2455 if (awnd < tp->snd_ssthresh) {
2456 tp->snd_cwnd += tp->t_maxseg;
2457 if (tp->snd_cwnd > tp->snd_ssthresh)
2458 tp->snd_cwnd = tp->snd_ssthresh;
2459 }
2460 } else
2461 tp->snd_cwnd += tp->t_maxseg;
2462 if ((thflags & TH_FIN) &&
2463 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2464 /*
2465 * If its a fin we need to process
2466 * it to avoid a race where both
2467 * sides enter FIN-WAIT and send FIN|ACK
2468 * at the same time.
2469 */
2470 break;
2471 }
2472 (void) tcp_output(tp);
2473 goto drop;
2474 } else if (tp->t_dupacks == tcprexmtthresh) {
2475 tcp_seq onxt = tp->snd_nxt;
2476
2477 /*
2478 * If we're doing sack, check to
2479 * see if we're already in sack
2480 * recovery. If we're not doing sack,
2481 * check to see if we're in newreno
2482 * recovery.
2483 */
2484 if (tp->t_flags & TF_SACK_PERMIT) {
2485 if (IN_FASTRECOVERY(tp->t_flags)) {
2486 tp->t_dupacks = 0;
2487 break;
2488 }
2489 } else {
2490 if (SEQ_LEQ(th->th_ack,
2491 tp->snd_recover)) {
2492 tp->t_dupacks = 0;
2493 break;
2494 }
2495 }
2496 /* Congestion signal before ack. */
2497 cc_cong_signal(tp, th, CC_NDUPACK);
2498 cc_ack_received(tp, th, CC_DUPACK);
2499 tcp_timer_activate(tp, TT_REXMT, 0);
2500 tp->t_rtttime = 0;
2501 if (tp->t_flags & TF_SACK_PERMIT) {
2502 TCPSTAT_INC(
2503 tcps_sack_recovery_episode);
2504 tp->sack_newdata = tp->snd_nxt;
2505 tp->snd_cwnd = tp->t_maxseg;
2506 (void) tcp_output(tp);
2507 goto drop;
2508 }
2509 tp->snd_nxt = th->th_ack;
2510 tp->snd_cwnd = tp->t_maxseg;
2511 if ((thflags & TH_FIN) &&
2512 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2513 /*
2514 * If its a fin we need to process
2515 * it to avoid a race where both
2516 * sides enter FIN-WAIT and send FIN|ACK
2517 * at the same time.
2518 */
2519 break;
2520 }
2521 (void) tcp_output(tp);
2522 KASSERT(tp->snd_limited <= 2,
2523 ("%s: tp->snd_limited too big",
2524 __func__));
2525 tp->snd_cwnd = tp->snd_ssthresh +
2526 tp->t_maxseg *
2527 (tp->t_dupacks - tp->snd_limited);
2528 if (SEQ_GT(onxt, tp->snd_nxt))
2529 tp->snd_nxt = onxt;
2530 goto drop;
2531 } else if (V_tcp_do_rfc3042) {
2532 cc_ack_received(tp, th, CC_DUPACK);
2533 u_long oldcwnd = tp->snd_cwnd;
2534 tcp_seq oldsndmax = tp->snd_max;
2535 u_int sent;
2536 int avail;
2537
2538 KASSERT(tp->t_dupacks == 1 ||
2539 tp->t_dupacks == 2,
2540 ("%s: dupacks not 1 or 2",
2541 __func__));
2542 if (tp->t_dupacks == 1)
2543 tp->snd_limited = 0;
2544 tp->snd_cwnd =
2545 (tp->snd_nxt - tp->snd_una) +
2546 (tp->t_dupacks - tp->snd_limited) *
2547 tp->t_maxseg;
2548 if ((thflags & TH_FIN) &&
2549 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2550 /*
2551 * If its a fin we need to process
2552 * it to avoid a race where both
2553 * sides enter FIN-WAIT and send FIN|ACK
2554 * at the same time.
2555 */
2556 break;
2557 }
2558 /*
2559 * Only call tcp_output when there
2560 * is new data available to be sent.
2561 * Otherwise we would send pure ACKs.
2562 */
2563 SOCKBUF_LOCK(&so->so_snd);
2564 avail = so->so_snd.sb_cc -
2565 (tp->snd_nxt - tp->snd_una);
2566 SOCKBUF_UNLOCK(&so->so_snd);
2567 if (avail > 0)
2568 (void) tcp_output(tp);
2569 sent = tp->snd_max - oldsndmax;
2570 if (sent > tp->t_maxseg) {
2571 KASSERT((tp->t_dupacks == 2 &&
2572 tp->snd_limited == 0) ||
2573 (sent == tp->t_maxseg + 1 &&
2574 tp->t_flags & TF_SENTFIN),
2575 ("%s: sent too much",
2576 __func__));
2577 tp->snd_limited = 2;
2578 } else if (sent > 0)
2579 ++tp->snd_limited;
2580 tp->snd_cwnd = oldcwnd;
2581 goto drop;
2582 }
2583 } else
2584 tp->t_dupacks = 0;
2585 break;
2586 }
2587
2588 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2589 ("%s: th_ack <= snd_una", __func__));
2590
2591 /*
2592 * If the congestion window was inflated to account
2593 * for the other side's cached packets, retract it.
2594 */
2595 if (IN_FASTRECOVERY(tp->t_flags)) {
2596 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2597 if (tp->t_flags & TF_SACK_PERMIT)
2598 tcp_sack_partialack(tp, th);
2599 else
2600 tcp_newreno_partial_ack(tp, th);
2601 } else
2602 cc_post_recovery(tp, th);
2603 }
2604 tp->t_dupacks = 0;
2605 /*
2606 * If we reach this point, ACK is not a duplicate,
2607 * i.e., it ACKs something we sent.
2608 */
2609 if (tp->t_flags & TF_NEEDSYN) {
2610 /*
2611 * T/TCP: Connection was half-synchronized, and our
2612 * SYN has been ACK'd (so connection is now fully
2613 * synchronized). Go to non-starred state,
2614 * increment snd_una for ACK of SYN, and check if
2615 * we can do window scaling.
2616 */
2617 tp->t_flags &= ~TF_NEEDSYN;
2618 tp->snd_una++;
2619 /* Do window scaling? */
2620 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2621 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2622 tp->rcv_scale = tp->request_r_scale;
2623 /* Send window already scaled. */
2624 }
2625 }
2626
2627process_ACK:
2628 INP_WLOCK_ASSERT(tp->t_inpcb);
2629
2630 acked = BYTES_THIS_ACK(tp, th);
2631 TCPSTAT_INC(tcps_rcvackpack);
2632 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2633
2634 /*
2635 * If we just performed our first retransmit, and the ACK
2636 * arrives within our recovery window, then it was a mistake
2637 * to do the retransmit in the first place. Recover our
2638 * original cwnd and ssthresh, and proceed to transmit where
2639 * we left off.
2640 */
2641 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2642 (int)(ticks - tp->t_badrxtwin) < 0)
2643 cc_cong_signal(tp, th, CC_RTO_ERR);
2644
2645 /*
2646 * If we have a timestamp reply, update smoothed
2647 * round trip time. If no timestamp is present but
2648 * transmit timer is running and timed sequence
2649 * number was acked, update smoothed round trip time.
2650 * Since we now have an rtt measurement, cancel the
2651 * timer backoff (cf., Phil Karn's retransmit alg.).
2652 * Recompute the initial retransmit timer.
2653 *
2654 * Some boxes send broken timestamp replies
2655 * during the SYN+ACK phase, ignore
2656 * timestamps of 0 or we could calculate a
2657 * huge RTT and blow up the retransmit timer.
2658 */
2659 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2660 u_int t;
2661
2662 t = tcp_ts_getticks() - to.to_tsecr;
2663 if (!tp->t_rttlow || tp->t_rttlow > t)
2664 tp->t_rttlow = t;
2665 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2666 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2667 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2668 tp->t_rttlow = ticks - tp->t_rtttime;
2669 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2670 }
2671
2672 /*
2673 * If all outstanding data is acked, stop retransmit
2674 * timer and remember to restart (more output or persist).
2675 * If there is more data to be acked, restart retransmit
2676 * timer, using current (possibly backed-off) value.
2677 */
2678 if (th->th_ack == tp->snd_max) {
2679 tcp_timer_activate(tp, TT_REXMT, 0);
2680 needoutput = 1;
2681 } else if (!tcp_timer_active(tp, TT_PERSIST))
2682 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2683
2684 /*
2685 * If no data (only SYN) was ACK'd,
2686 * skip rest of ACK processing.
2687 */
2688 if (acked == 0)
2689 goto step6;
2690
2691 /*
2692 * Let the congestion control algorithm update congestion
2693 * control related information. This typically means increasing
2694 * the congestion window.
2695 */
2696 cc_ack_received(tp, th, CC_ACK);
2697
2698 SOCKBUF_LOCK(&so->so_snd);
2699 if (acked > so->so_snd.sb_cc) {
2700 tp->snd_wnd -= so->so_snd.sb_cc;
2701 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2702 ourfinisacked = 1;
2703 } else {
2704 sbdrop_locked(&so->so_snd, acked);
2705 tp->snd_wnd -= acked;
2706 ourfinisacked = 0;
2707 }
2708 /* NB: sowwakeup_locked() does an implicit unlock. */
2709 sowwakeup_locked(so);
2710 /* Detect una wraparound. */
2711 if (!IN_RECOVERY(tp->t_flags) &&
2712 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2713 SEQ_LEQ(th->th_ack, tp->snd_recover))
2714 tp->snd_recover = th->th_ack - 1;
2715 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2716 if (IN_RECOVERY(tp->t_flags) &&
2717 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2718 EXIT_RECOVERY(tp->t_flags);
2719 }
2720 tp->snd_una = th->th_ack;
2721 if (tp->t_flags & TF_SACK_PERMIT) {
2722 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2723 tp->snd_recover = tp->snd_una;
2724 }
2725 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2726 tp->snd_nxt = tp->snd_una;
2727
2728 switch (tp->t_state) {
2729
2730 /*
2731 * In FIN_WAIT_1 STATE in addition to the processing
2732 * for the ESTABLISHED state if our FIN is now acknowledged
2733 * then enter FIN_WAIT_2.
2734 */
2735 case TCPS_FIN_WAIT_1:
2736 if (ourfinisacked) {
2737 /*
2738 * If we can't receive any more
2739 * data, then closing user can proceed.
2740 * Starting the timer is contrary to the
2741 * specification, but if we don't get a FIN
2742 * we'll hang forever.
2743 *
2744 * XXXjl:
2745 * we should release the tp also, and use a
2746 * compressed state.
2747 */
2748 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2749 soisdisconnected(so);
2750 tcp_timer_activate(tp, TT_2MSL,
2751 (tcp_fast_finwait2_recycle ?
2752 tcp_finwait2_timeout :
2753 TP_MAXIDLE(tp)));
2754 }
2388 cc_conn_init(tp);
2389 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp));
2390 }
2391 /*
2392 * If segment contains data or ACK, will call tcp_reass()
2393 * later; if not, do so now to pass queued data to user.
2394 */
2395 if (tlen == 0 && (thflags & TH_FIN) == 0)
2396 (void) tcp_reass(tp, (struct tcphdr *)0, 0,
2397 (struct mbuf *)0);
2398 tp->snd_wl1 = th->th_seq - 1;
2399 /* FALLTHROUGH */
2400
2401 /*
2402 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
2403 * ACKs. If the ack is in the range
2404 * tp->snd_una < th->th_ack <= tp->snd_max
2405 * then advance tp->snd_una to th->th_ack and drop
2406 * data from the retransmission queue. If this ACK reflects
2407 * more up to date window information we update our window information.
2408 */
2409 case TCPS_ESTABLISHED:
2410 case TCPS_FIN_WAIT_1:
2411 case TCPS_FIN_WAIT_2:
2412 case TCPS_CLOSE_WAIT:
2413 case TCPS_CLOSING:
2414 case TCPS_LAST_ACK:
2415 if (SEQ_GT(th->th_ack, tp->snd_max)) {
2416 TCPSTAT_INC(tcps_rcvacktoomuch);
2417 goto dropafterack;
2418 }
2419 if ((tp->t_flags & TF_SACK_PERMIT) &&
2420 ((to.to_flags & TOF_SACK) ||
2421 !TAILQ_EMPTY(&tp->snd_holes)))
2422 tcp_sack_doack(tp, &to, th->th_ack);
2423
2424 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
2425 hhook_run_tcp_est_in(tp, th, &to);
2426
2427 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
2428 if (tlen == 0 && tiwin == tp->snd_wnd) {
2429 TCPSTAT_INC(tcps_rcvdupack);
2430 /*
2431 * If we have outstanding data (other than
2432 * a window probe), this is a completely
2433 * duplicate ack (ie, window info didn't
2434 * change), the ack is the biggest we've
2435 * seen and we've seen exactly our rexmt
2436 * threshhold of them, assume a packet
2437 * has been dropped and retransmit it.
2438 * Kludge snd_nxt & the congestion
2439 * window so we send only this one
2440 * packet.
2441 *
2442 * We know we're losing at the current
2443 * window size so do congestion avoidance
2444 * (set ssthresh to half the current window
2445 * and pull our congestion window back to
2446 * the new ssthresh).
2447 *
2448 * Dup acks mean that packets have left the
2449 * network (they're now cached at the receiver)
2450 * so bump cwnd by the amount in the receiver
2451 * to keep a constant cwnd packets in the
2452 * network.
2453 *
2454 * When using TCP ECN, notify the peer that
2455 * we reduced the cwnd.
2456 */
2457 if (!tcp_timer_active(tp, TT_REXMT) ||
2458 th->th_ack != tp->snd_una)
2459 tp->t_dupacks = 0;
2460 else if (++tp->t_dupacks > tcprexmtthresh ||
2461 IN_FASTRECOVERY(tp->t_flags)) {
2462 cc_ack_received(tp, th, CC_DUPACK);
2463 if ((tp->t_flags & TF_SACK_PERMIT) &&
2464 IN_FASTRECOVERY(tp->t_flags)) {
2465 int awnd;
2466
2467 /*
2468 * Compute the amount of data in flight first.
2469 * We can inject new data into the pipe iff
2470 * we have less than 1/2 the original window's
2471 * worth of data in flight.
2472 */
2473 awnd = (tp->snd_nxt - tp->snd_fack) +
2474 tp->sackhint.sack_bytes_rexmit;
2475 if (awnd < tp->snd_ssthresh) {
2476 tp->snd_cwnd += tp->t_maxseg;
2477 if (tp->snd_cwnd > tp->snd_ssthresh)
2478 tp->snd_cwnd = tp->snd_ssthresh;
2479 }
2480 } else
2481 tp->snd_cwnd += tp->t_maxseg;
2482 if ((thflags & TH_FIN) &&
2483 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2484 /*
2485 * If its a fin we need to process
2486 * it to avoid a race where both
2487 * sides enter FIN-WAIT and send FIN|ACK
2488 * at the same time.
2489 */
2490 break;
2491 }
2492 (void) tcp_output(tp);
2493 goto drop;
2494 } else if (tp->t_dupacks == tcprexmtthresh) {
2495 tcp_seq onxt = tp->snd_nxt;
2496
2497 /*
2498 * If we're doing sack, check to
2499 * see if we're already in sack
2500 * recovery. If we're not doing sack,
2501 * check to see if we're in newreno
2502 * recovery.
2503 */
2504 if (tp->t_flags & TF_SACK_PERMIT) {
2505 if (IN_FASTRECOVERY(tp->t_flags)) {
2506 tp->t_dupacks = 0;
2507 break;
2508 }
2509 } else {
2510 if (SEQ_LEQ(th->th_ack,
2511 tp->snd_recover)) {
2512 tp->t_dupacks = 0;
2513 break;
2514 }
2515 }
2516 /* Congestion signal before ack. */
2517 cc_cong_signal(tp, th, CC_NDUPACK);
2518 cc_ack_received(tp, th, CC_DUPACK);
2519 tcp_timer_activate(tp, TT_REXMT, 0);
2520 tp->t_rtttime = 0;
2521 if (tp->t_flags & TF_SACK_PERMIT) {
2522 TCPSTAT_INC(
2523 tcps_sack_recovery_episode);
2524 tp->sack_newdata = tp->snd_nxt;
2525 tp->snd_cwnd = tp->t_maxseg;
2526 (void) tcp_output(tp);
2527 goto drop;
2528 }
2529 tp->snd_nxt = th->th_ack;
2530 tp->snd_cwnd = tp->t_maxseg;
2531 if ((thflags & TH_FIN) &&
2532 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2533 /*
2534 * If its a fin we need to process
2535 * it to avoid a race where both
2536 * sides enter FIN-WAIT and send FIN|ACK
2537 * at the same time.
2538 */
2539 break;
2540 }
2541 (void) tcp_output(tp);
2542 KASSERT(tp->snd_limited <= 2,
2543 ("%s: tp->snd_limited too big",
2544 __func__));
2545 tp->snd_cwnd = tp->snd_ssthresh +
2546 tp->t_maxseg *
2547 (tp->t_dupacks - tp->snd_limited);
2548 if (SEQ_GT(onxt, tp->snd_nxt))
2549 tp->snd_nxt = onxt;
2550 goto drop;
2551 } else if (V_tcp_do_rfc3042) {
2552 cc_ack_received(tp, th, CC_DUPACK);
2553 u_long oldcwnd = tp->snd_cwnd;
2554 tcp_seq oldsndmax = tp->snd_max;
2555 u_int sent;
2556 int avail;
2557
2558 KASSERT(tp->t_dupacks == 1 ||
2559 tp->t_dupacks == 2,
2560 ("%s: dupacks not 1 or 2",
2561 __func__));
2562 if (tp->t_dupacks == 1)
2563 tp->snd_limited = 0;
2564 tp->snd_cwnd =
2565 (tp->snd_nxt - tp->snd_una) +
2566 (tp->t_dupacks - tp->snd_limited) *
2567 tp->t_maxseg;
2568 if ((thflags & TH_FIN) &&
2569 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
2570 /*
2571 * If its a fin we need to process
2572 * it to avoid a race where both
2573 * sides enter FIN-WAIT and send FIN|ACK
2574 * at the same time.
2575 */
2576 break;
2577 }
2578 /*
2579 * Only call tcp_output when there
2580 * is new data available to be sent.
2581 * Otherwise we would send pure ACKs.
2582 */
2583 SOCKBUF_LOCK(&so->so_snd);
2584 avail = so->so_snd.sb_cc -
2585 (tp->snd_nxt - tp->snd_una);
2586 SOCKBUF_UNLOCK(&so->so_snd);
2587 if (avail > 0)
2588 (void) tcp_output(tp);
2589 sent = tp->snd_max - oldsndmax;
2590 if (sent > tp->t_maxseg) {
2591 KASSERT((tp->t_dupacks == 2 &&
2592 tp->snd_limited == 0) ||
2593 (sent == tp->t_maxseg + 1 &&
2594 tp->t_flags & TF_SENTFIN),
2595 ("%s: sent too much",
2596 __func__));
2597 tp->snd_limited = 2;
2598 } else if (sent > 0)
2599 ++tp->snd_limited;
2600 tp->snd_cwnd = oldcwnd;
2601 goto drop;
2602 }
2603 } else
2604 tp->t_dupacks = 0;
2605 break;
2606 }
2607
2608 KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
2609 ("%s: th_ack <= snd_una", __func__));
2610
2611 /*
2612 * If the congestion window was inflated to account
2613 * for the other side's cached packets, retract it.
2614 */
2615 if (IN_FASTRECOVERY(tp->t_flags)) {
2616 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
2617 if (tp->t_flags & TF_SACK_PERMIT)
2618 tcp_sack_partialack(tp, th);
2619 else
2620 tcp_newreno_partial_ack(tp, th);
2621 } else
2622 cc_post_recovery(tp, th);
2623 }
2624 tp->t_dupacks = 0;
2625 /*
2626 * If we reach this point, ACK is not a duplicate,
2627 * i.e., it ACKs something we sent.
2628 */
2629 if (tp->t_flags & TF_NEEDSYN) {
2630 /*
2631 * T/TCP: Connection was half-synchronized, and our
2632 * SYN has been ACK'd (so connection is now fully
2633 * synchronized). Go to non-starred state,
2634 * increment snd_una for ACK of SYN, and check if
2635 * we can do window scaling.
2636 */
2637 tp->t_flags &= ~TF_NEEDSYN;
2638 tp->snd_una++;
2639 /* Do window scaling? */
2640 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2641 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2642 tp->rcv_scale = tp->request_r_scale;
2643 /* Send window already scaled. */
2644 }
2645 }
2646
2647process_ACK:
2648 INP_WLOCK_ASSERT(tp->t_inpcb);
2649
2650 acked = BYTES_THIS_ACK(tp, th);
2651 TCPSTAT_INC(tcps_rcvackpack);
2652 TCPSTAT_ADD(tcps_rcvackbyte, acked);
2653
2654 /*
2655 * If we just performed our first retransmit, and the ACK
2656 * arrives within our recovery window, then it was a mistake
2657 * to do the retransmit in the first place. Recover our
2658 * original cwnd and ssthresh, and proceed to transmit where
2659 * we left off.
2660 */
2661 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID &&
2662 (int)(ticks - tp->t_badrxtwin) < 0)
2663 cc_cong_signal(tp, th, CC_RTO_ERR);
2664
2665 /*
2666 * If we have a timestamp reply, update smoothed
2667 * round trip time. If no timestamp is present but
2668 * transmit timer is running and timed sequence
2669 * number was acked, update smoothed round trip time.
2670 * Since we now have an rtt measurement, cancel the
2671 * timer backoff (cf., Phil Karn's retransmit alg.).
2672 * Recompute the initial retransmit timer.
2673 *
2674 * Some boxes send broken timestamp replies
2675 * during the SYN+ACK phase, ignore
2676 * timestamps of 0 or we could calculate a
2677 * huge RTT and blow up the retransmit timer.
2678 */
2679 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) {
2680 u_int t;
2681
2682 t = tcp_ts_getticks() - to.to_tsecr;
2683 if (!tp->t_rttlow || tp->t_rttlow > t)
2684 tp->t_rttlow = t;
2685 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1);
2686 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
2687 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
2688 tp->t_rttlow = ticks - tp->t_rtttime;
2689 tcp_xmit_timer(tp, ticks - tp->t_rtttime);
2690 }
2691
2692 /*
2693 * If all outstanding data is acked, stop retransmit
2694 * timer and remember to restart (more output or persist).
2695 * If there is more data to be acked, restart retransmit
2696 * timer, using current (possibly backed-off) value.
2697 */
2698 if (th->th_ack == tp->snd_max) {
2699 tcp_timer_activate(tp, TT_REXMT, 0);
2700 needoutput = 1;
2701 } else if (!tcp_timer_active(tp, TT_PERSIST))
2702 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
2703
2704 /*
2705 * If no data (only SYN) was ACK'd,
2706 * skip rest of ACK processing.
2707 */
2708 if (acked == 0)
2709 goto step6;
2710
2711 /*
2712 * Let the congestion control algorithm update congestion
2713 * control related information. This typically means increasing
2714 * the congestion window.
2715 */
2716 cc_ack_received(tp, th, CC_ACK);
2717
2718 SOCKBUF_LOCK(&so->so_snd);
2719 if (acked > so->so_snd.sb_cc) {
2720 tp->snd_wnd -= so->so_snd.sb_cc;
2721 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
2722 ourfinisacked = 1;
2723 } else {
2724 sbdrop_locked(&so->so_snd, acked);
2725 tp->snd_wnd -= acked;
2726 ourfinisacked = 0;
2727 }
2728 /* NB: sowwakeup_locked() does an implicit unlock. */
2729 sowwakeup_locked(so);
2730 /* Detect una wraparound. */
2731 if (!IN_RECOVERY(tp->t_flags) &&
2732 SEQ_GT(tp->snd_una, tp->snd_recover) &&
2733 SEQ_LEQ(th->th_ack, tp->snd_recover))
2734 tp->snd_recover = th->th_ack - 1;
2735 /* XXXLAS: Can this be moved up into cc_post_recovery? */
2736 if (IN_RECOVERY(tp->t_flags) &&
2737 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
2738 EXIT_RECOVERY(tp->t_flags);
2739 }
2740 tp->snd_una = th->th_ack;
2741 if (tp->t_flags & TF_SACK_PERMIT) {
2742 if (SEQ_GT(tp->snd_una, tp->snd_recover))
2743 tp->snd_recover = tp->snd_una;
2744 }
2745 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
2746 tp->snd_nxt = tp->snd_una;
2747
2748 switch (tp->t_state) {
2749
2750 /*
2751 * In FIN_WAIT_1 STATE in addition to the processing
2752 * for the ESTABLISHED state if our FIN is now acknowledged
2753 * then enter FIN_WAIT_2.
2754 */
2755 case TCPS_FIN_WAIT_1:
2756 if (ourfinisacked) {
2757 /*
2758 * If we can't receive any more
2759 * data, then closing user can proceed.
2760 * Starting the timer is contrary to the
2761 * specification, but if we don't get a FIN
2762 * we'll hang forever.
2763 *
2764 * XXXjl:
2765 * we should release the tp also, and use a
2766 * compressed state.
2767 */
2768 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2769 soisdisconnected(so);
2770 tcp_timer_activate(tp, TT_2MSL,
2771 (tcp_fast_finwait2_recycle ?
2772 tcp_finwait2_timeout :
2773 TP_MAXIDLE(tp)));
2774 }
2755 tp->t_state = TCPS_FIN_WAIT_2;
2775 tcp_state_change(tp, TCPS_FIN_WAIT_2);
2756 }
2757 break;
2758
2759 /*
2760 * In CLOSING STATE in addition to the processing for
2761 * the ESTABLISHED state if the ACK acknowledges our FIN
2762 * then enter the TIME-WAIT state, otherwise ignore
2763 * the segment.
2764 */
2765 case TCPS_CLOSING:
2766 if (ourfinisacked) {
2767 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2768 tcp_twstart(tp);
2769 INP_INFO_WUNLOCK(&V_tcbinfo);
2770 m_freem(m);
2771 return;
2772 }
2773 break;
2774
2775 /*
2776 * In LAST_ACK, we may still be waiting for data to drain
2777 * and/or to be acked, as well as for the ack of our FIN.
2778 * If our FIN is now acknowledged, delete the TCB,
2779 * enter the closed state and return.
2780 */
2781 case TCPS_LAST_ACK:
2782 if (ourfinisacked) {
2783 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2784 tp = tcp_close(tp);
2785 goto drop;
2786 }
2787 break;
2788 }
2789 }
2790
2791step6:
2792 INP_WLOCK_ASSERT(tp->t_inpcb);
2793
2794 /*
2795 * Update window information.
2796 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2797 */
2798 if ((thflags & TH_ACK) &&
2799 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2800 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2801 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2802 /* keep track of pure window updates */
2803 if (tlen == 0 &&
2804 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2805 TCPSTAT_INC(tcps_rcvwinupd);
2806 tp->snd_wnd = tiwin;
2807 tp->snd_wl1 = th->th_seq;
2808 tp->snd_wl2 = th->th_ack;
2809 if (tp->snd_wnd > tp->max_sndwnd)
2810 tp->max_sndwnd = tp->snd_wnd;
2811 needoutput = 1;
2812 }
2813
2814 /*
2815 * Process segments with URG.
2816 */
2817 if ((thflags & TH_URG) && th->th_urp &&
2818 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2819 /*
2820 * This is a kludge, but if we receive and accept
2821 * random urgent pointers, we'll crash in
2822 * soreceive. It's hard to imagine someone
2823 * actually wanting to send this much urgent data.
2824 */
2825 SOCKBUF_LOCK(&so->so_rcv);
2826 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2827 th->th_urp = 0; /* XXX */
2828 thflags &= ~TH_URG; /* XXX */
2829 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2830 goto dodata; /* XXX */
2831 }
2832 /*
2833 * If this segment advances the known urgent pointer,
2834 * then mark the data stream. This should not happen
2835 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2836 * a FIN has been received from the remote side.
2837 * In these states we ignore the URG.
2838 *
2839 * According to RFC961 (Assigned Protocols),
2840 * the urgent pointer points to the last octet
2841 * of urgent data. We continue, however,
2842 * to consider it to indicate the first octet
2843 * of data past the urgent section as the original
2844 * spec states (in one of two places).
2845 */
2846 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2847 tp->rcv_up = th->th_seq + th->th_urp;
2848 so->so_oobmark = so->so_rcv.sb_cc +
2849 (tp->rcv_up - tp->rcv_nxt) - 1;
2850 if (so->so_oobmark == 0)
2851 so->so_rcv.sb_state |= SBS_RCVATMARK;
2852 sohasoutofband(so);
2853 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2854 }
2855 SOCKBUF_UNLOCK(&so->so_rcv);
2856 /*
2857 * Remove out of band data so doesn't get presented to user.
2858 * This can happen independent of advancing the URG pointer,
2859 * but if two URG's are pending at once, some out-of-band
2860 * data may creep in... ick.
2861 */
2862 if (th->th_urp <= (u_long)tlen &&
2863 !(so->so_options & SO_OOBINLINE)) {
2864 /* hdr drop is delayed */
2865 tcp_pulloutofband(so, th, m, drop_hdrlen);
2866 }
2867 } else {
2868 /*
2869 * If no out of band data is expected,
2870 * pull receive urgent pointer along
2871 * with the receive window.
2872 */
2873 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2874 tp->rcv_up = tp->rcv_nxt;
2875 }
2876dodata: /* XXX */
2877 INP_WLOCK_ASSERT(tp->t_inpcb);
2878
2879 /*
2880 * Process the segment text, merging it into the TCP sequencing queue,
2881 * and arranging for acknowledgment of receipt if necessary.
2882 * This process logically involves adjusting tp->rcv_wnd as data
2883 * is presented to the user (this happens in tcp_usrreq.c,
2884 * case PRU_RCVD). If a FIN has already been received on this
2885 * connection then we just ignore the text.
2886 */
2887 if ((tlen || (thflags & TH_FIN)) &&
2888 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2889 tcp_seq save_start = th->th_seq;
2890 m_adj(m, drop_hdrlen); /* delayed header drop */
2891 /*
2892 * Insert segment which includes th into TCP reassembly queue
2893 * with control block tp. Set thflags to whether reassembly now
2894 * includes a segment with FIN. This handles the common case
2895 * inline (segment is the next to be received on an established
2896 * connection, and the queue is empty), avoiding linkage into
2897 * and removal from the queue and repetition of various
2898 * conversions.
2899 * Set DELACK for segments received in order, but ack
2900 * immediately when segments are out of order (so
2901 * fast retransmit can work).
2902 */
2903 if (th->th_seq == tp->rcv_nxt &&
2904 LIST_EMPTY(&tp->t_segq) &&
2905 TCPS_HAVEESTABLISHED(tp->t_state)) {
2906 if (DELAY_ACK(tp))
2907 tp->t_flags |= TF_DELACK;
2908 else
2909 tp->t_flags |= TF_ACKNOW;
2910 tp->rcv_nxt += tlen;
2911 thflags = th->th_flags & TH_FIN;
2912 TCPSTAT_INC(tcps_rcvpack);
2913 TCPSTAT_ADD(tcps_rcvbyte, tlen);
2914 ND6_HINT(tp);
2915 SOCKBUF_LOCK(&so->so_rcv);
2916 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2917 m_freem(m);
2918 else
2919 sbappendstream_locked(&so->so_rcv, m);
2920 /* NB: sorwakeup_locked() does an implicit unlock. */
2921 sorwakeup_locked(so);
2922 } else {
2923 /*
2924 * XXX: Due to the header drop above "th" is
2925 * theoretically invalid by now. Fortunately
2926 * m_adj() doesn't actually frees any mbufs
2927 * when trimming from the head.
2928 */
2929 thflags = tcp_reass(tp, th, &tlen, m);
2930 tp->t_flags |= TF_ACKNOW;
2931 }
2932 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2933 tcp_update_sack_list(tp, save_start, save_start + tlen);
2934#if 0
2935 /*
2936 * Note the amount of data that peer has sent into
2937 * our window, in order to estimate the sender's
2938 * buffer size.
2939 * XXX: Unused.
2940 */
2941 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
2942 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2943 else
2944 len = so->so_rcv.sb_hiwat;
2945#endif
2946 } else {
2947 m_freem(m);
2948 thflags &= ~TH_FIN;
2949 }
2950
2951 /*
2952 * If FIN is received ACK the FIN and let the user know
2953 * that the connection is closing.
2954 */
2955 if (thflags & TH_FIN) {
2956 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2957 socantrcvmore(so);
2958 /*
2959 * If connection is half-synchronized
2960 * (ie NEEDSYN flag on) then delay ACK,
2961 * so it may be piggybacked when SYN is sent.
2962 * Otherwise, since we received a FIN then no
2963 * more input can be expected, send ACK now.
2964 */
2965 if (tp->t_flags & TF_NEEDSYN)
2966 tp->t_flags |= TF_DELACK;
2967 else
2968 tp->t_flags |= TF_ACKNOW;
2969 tp->rcv_nxt++;
2970 }
2971 switch (tp->t_state) {
2972
2973 /*
2974 * In SYN_RECEIVED and ESTABLISHED STATES
2975 * enter the CLOSE_WAIT state.
2976 */
2977 case TCPS_SYN_RECEIVED:
2978 tp->t_starttime = ticks;
2979 /* FALLTHROUGH */
2980 case TCPS_ESTABLISHED:
2776 }
2777 break;
2778
2779 /*
2780 * In CLOSING STATE in addition to the processing for
2781 * the ESTABLISHED state if the ACK acknowledges our FIN
2782 * then enter the TIME-WAIT state, otherwise ignore
2783 * the segment.
2784 */
2785 case TCPS_CLOSING:
2786 if (ourfinisacked) {
2787 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2788 tcp_twstart(tp);
2789 INP_INFO_WUNLOCK(&V_tcbinfo);
2790 m_freem(m);
2791 return;
2792 }
2793 break;
2794
2795 /*
2796 * In LAST_ACK, we may still be waiting for data to drain
2797 * and/or to be acked, as well as for the ack of our FIN.
2798 * If our FIN is now acknowledged, delete the TCB,
2799 * enter the closed state and return.
2800 */
2801 case TCPS_LAST_ACK:
2802 if (ourfinisacked) {
2803 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2804 tp = tcp_close(tp);
2805 goto drop;
2806 }
2807 break;
2808 }
2809 }
2810
2811step6:
2812 INP_WLOCK_ASSERT(tp->t_inpcb);
2813
2814 /*
2815 * Update window information.
2816 * Don't look at window if no ACK: TAC's send garbage on first SYN.
2817 */
2818 if ((thflags & TH_ACK) &&
2819 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
2820 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
2821 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
2822 /* keep track of pure window updates */
2823 if (tlen == 0 &&
2824 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
2825 TCPSTAT_INC(tcps_rcvwinupd);
2826 tp->snd_wnd = tiwin;
2827 tp->snd_wl1 = th->th_seq;
2828 tp->snd_wl2 = th->th_ack;
2829 if (tp->snd_wnd > tp->max_sndwnd)
2830 tp->max_sndwnd = tp->snd_wnd;
2831 needoutput = 1;
2832 }
2833
2834 /*
2835 * Process segments with URG.
2836 */
2837 if ((thflags & TH_URG) && th->th_urp &&
2838 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2839 /*
2840 * This is a kludge, but if we receive and accept
2841 * random urgent pointers, we'll crash in
2842 * soreceive. It's hard to imagine someone
2843 * actually wanting to send this much urgent data.
2844 */
2845 SOCKBUF_LOCK(&so->so_rcv);
2846 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
2847 th->th_urp = 0; /* XXX */
2848 thflags &= ~TH_URG; /* XXX */
2849 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
2850 goto dodata; /* XXX */
2851 }
2852 /*
2853 * If this segment advances the known urgent pointer,
2854 * then mark the data stream. This should not happen
2855 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
2856 * a FIN has been received from the remote side.
2857 * In these states we ignore the URG.
2858 *
2859 * According to RFC961 (Assigned Protocols),
2860 * the urgent pointer points to the last octet
2861 * of urgent data. We continue, however,
2862 * to consider it to indicate the first octet
2863 * of data past the urgent section as the original
2864 * spec states (in one of two places).
2865 */
2866 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
2867 tp->rcv_up = th->th_seq + th->th_urp;
2868 so->so_oobmark = so->so_rcv.sb_cc +
2869 (tp->rcv_up - tp->rcv_nxt) - 1;
2870 if (so->so_oobmark == 0)
2871 so->so_rcv.sb_state |= SBS_RCVATMARK;
2872 sohasoutofband(so);
2873 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
2874 }
2875 SOCKBUF_UNLOCK(&so->so_rcv);
2876 /*
2877 * Remove out of band data so doesn't get presented to user.
2878 * This can happen independent of advancing the URG pointer,
2879 * but if two URG's are pending at once, some out-of-band
2880 * data may creep in... ick.
2881 */
2882 if (th->th_urp <= (u_long)tlen &&
2883 !(so->so_options & SO_OOBINLINE)) {
2884 /* hdr drop is delayed */
2885 tcp_pulloutofband(so, th, m, drop_hdrlen);
2886 }
2887 } else {
2888 /*
2889 * If no out of band data is expected,
2890 * pull receive urgent pointer along
2891 * with the receive window.
2892 */
2893 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
2894 tp->rcv_up = tp->rcv_nxt;
2895 }
2896dodata: /* XXX */
2897 INP_WLOCK_ASSERT(tp->t_inpcb);
2898
2899 /*
2900 * Process the segment text, merging it into the TCP sequencing queue,
2901 * and arranging for acknowledgment of receipt if necessary.
2902 * This process logically involves adjusting tp->rcv_wnd as data
2903 * is presented to the user (this happens in tcp_usrreq.c,
2904 * case PRU_RCVD). If a FIN has already been received on this
2905 * connection then we just ignore the text.
2906 */
2907 if ((tlen || (thflags & TH_FIN)) &&
2908 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2909 tcp_seq save_start = th->th_seq;
2910 m_adj(m, drop_hdrlen); /* delayed header drop */
2911 /*
2912 * Insert segment which includes th into TCP reassembly queue
2913 * with control block tp. Set thflags to whether reassembly now
2914 * includes a segment with FIN. This handles the common case
2915 * inline (segment is the next to be received on an established
2916 * connection, and the queue is empty), avoiding linkage into
2917 * and removal from the queue and repetition of various
2918 * conversions.
2919 * Set DELACK for segments received in order, but ack
2920 * immediately when segments are out of order (so
2921 * fast retransmit can work).
2922 */
2923 if (th->th_seq == tp->rcv_nxt &&
2924 LIST_EMPTY(&tp->t_segq) &&
2925 TCPS_HAVEESTABLISHED(tp->t_state)) {
2926 if (DELAY_ACK(tp))
2927 tp->t_flags |= TF_DELACK;
2928 else
2929 tp->t_flags |= TF_ACKNOW;
2930 tp->rcv_nxt += tlen;
2931 thflags = th->th_flags & TH_FIN;
2932 TCPSTAT_INC(tcps_rcvpack);
2933 TCPSTAT_ADD(tcps_rcvbyte, tlen);
2934 ND6_HINT(tp);
2935 SOCKBUF_LOCK(&so->so_rcv);
2936 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
2937 m_freem(m);
2938 else
2939 sbappendstream_locked(&so->so_rcv, m);
2940 /* NB: sorwakeup_locked() does an implicit unlock. */
2941 sorwakeup_locked(so);
2942 } else {
2943 /*
2944 * XXX: Due to the header drop above "th" is
2945 * theoretically invalid by now. Fortunately
2946 * m_adj() doesn't actually frees any mbufs
2947 * when trimming from the head.
2948 */
2949 thflags = tcp_reass(tp, th, &tlen, m);
2950 tp->t_flags |= TF_ACKNOW;
2951 }
2952 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
2953 tcp_update_sack_list(tp, save_start, save_start + tlen);
2954#if 0
2955 /*
2956 * Note the amount of data that peer has sent into
2957 * our window, in order to estimate the sender's
2958 * buffer size.
2959 * XXX: Unused.
2960 */
2961 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
2962 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
2963 else
2964 len = so->so_rcv.sb_hiwat;
2965#endif
2966 } else {
2967 m_freem(m);
2968 thflags &= ~TH_FIN;
2969 }
2970
2971 /*
2972 * If FIN is received ACK the FIN and let the user know
2973 * that the connection is closing.
2974 */
2975 if (thflags & TH_FIN) {
2976 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
2977 socantrcvmore(so);
2978 /*
2979 * If connection is half-synchronized
2980 * (ie NEEDSYN flag on) then delay ACK,
2981 * so it may be piggybacked when SYN is sent.
2982 * Otherwise, since we received a FIN then no
2983 * more input can be expected, send ACK now.
2984 */
2985 if (tp->t_flags & TF_NEEDSYN)
2986 tp->t_flags |= TF_DELACK;
2987 else
2988 tp->t_flags |= TF_ACKNOW;
2989 tp->rcv_nxt++;
2990 }
2991 switch (tp->t_state) {
2992
2993 /*
2994 * In SYN_RECEIVED and ESTABLISHED STATES
2995 * enter the CLOSE_WAIT state.
2996 */
2997 case TCPS_SYN_RECEIVED:
2998 tp->t_starttime = ticks;
2999 /* FALLTHROUGH */
3000 case TCPS_ESTABLISHED:
2981 tp->t_state = TCPS_CLOSE_WAIT;
3001 tcp_state_change(tp, TCPS_CLOSE_WAIT);
2982 break;
2983
2984 /*
2985 * If still in FIN_WAIT_1 STATE FIN has not been acked so
2986 * enter the CLOSING state.
2987 */
2988 case TCPS_FIN_WAIT_1:
3002 break;
3003
3004 /*
3005 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3006 * enter the CLOSING state.
3007 */
3008 case TCPS_FIN_WAIT_1:
2989 tp->t_state = TCPS_CLOSING;
3009 tcp_state_change(tp, TCPS_CLOSING);
2990 break;
2991
2992 /*
2993 * In FIN_WAIT_2 state enter the TIME_WAIT state,
2994 * starting the time-wait timer, turning off the other
2995 * standard timers.
2996 */
2997 case TCPS_FIN_WAIT_2:
2998 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
2999 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata "
3000 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
3001 ti_locked));
3002
3003 tcp_twstart(tp);
3004 INP_INFO_WUNLOCK(&V_tcbinfo);
3005 return;
3006 }
3007 }
3008 if (ti_locked == TI_WLOCKED)
3009 INP_INFO_WUNLOCK(&V_tcbinfo);
3010 ti_locked = TI_UNLOCKED;
3011
3012#ifdef TCPDEBUG
3013 if (so->so_options & SO_DEBUG)
3014 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3015 &tcp_savetcp, 0);
3016#endif
3017
3018 /*
3019 * Return any desired output.
3020 */
3021 if (needoutput || (tp->t_flags & TF_ACKNOW))
3022 (void) tcp_output(tp);
3023
3024check_delack:
3025 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3026 __func__, ti_locked));
3027 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3028 INP_WLOCK_ASSERT(tp->t_inpcb);
3029
3030 if (tp->t_flags & TF_DELACK) {
3031 tp->t_flags &= ~TF_DELACK;
3032 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3033 }
3034 INP_WUNLOCK(tp->t_inpcb);
3035 return;
3036
3037dropafterack:
3038 /*
3039 * Generate an ACK dropping incoming segment if it occupies
3040 * sequence space, where the ACK reflects our state.
3041 *
3042 * We can now skip the test for the RST flag since all
3043 * paths to this code happen after packets containing
3044 * RST have been dropped.
3045 *
3046 * In the SYN-RECEIVED state, don't send an ACK unless the
3047 * segment we received passes the SYN-RECEIVED ACK test.
3048 * If it fails send a RST. This breaks the loop in the
3049 * "LAND" DoS attack, and also prevents an ACK storm
3050 * between two listening ports that have been sent forged
3051 * SYN segments, each with the source address of the other.
3052 */
3053 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3054 (SEQ_GT(tp->snd_una, th->th_ack) ||
3055 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3056 rstreason = BANDLIM_RST_OPENPORT;
3057 goto dropwithreset;
3058 }
3059#ifdef TCPDEBUG
3060 if (so->so_options & SO_DEBUG)
3061 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3062 &tcp_savetcp, 0);
3063#endif
3064 if (ti_locked == TI_WLOCKED)
3065 INP_INFO_WUNLOCK(&V_tcbinfo);
3066 ti_locked = TI_UNLOCKED;
3067
3068 tp->t_flags |= TF_ACKNOW;
3069 (void) tcp_output(tp);
3070 INP_WUNLOCK(tp->t_inpcb);
3071 m_freem(m);
3072 return;
3073
3074dropwithreset:
3075 if (ti_locked == TI_WLOCKED)
3076 INP_INFO_WUNLOCK(&V_tcbinfo);
3077 ti_locked = TI_UNLOCKED;
3078
3079 if (tp != NULL) {
3080 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3081 INP_WUNLOCK(tp->t_inpcb);
3082 } else
3083 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3084 return;
3085
3086drop:
3087 if (ti_locked == TI_WLOCKED) {
3088 INP_INFO_WUNLOCK(&V_tcbinfo);
3089 ti_locked = TI_UNLOCKED;
3090 }
3091#ifdef INVARIANTS
3092 else
3093 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3094#endif
3095
3096 /*
3097 * Drop space held by incoming segment and return.
3098 */
3099#ifdef TCPDEBUG
3100 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3101 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3102 &tcp_savetcp, 0);
3103#endif
3104 if (tp != NULL)
3105 INP_WUNLOCK(tp->t_inpcb);
3106 m_freem(m);
3107}
3108
3109/*
3110 * Issue RST and make ACK acceptable to originator of segment.
3111 * The mbuf must still include the original packet header.
3112 * tp may be NULL.
3113 */
3114static void
3115tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3116 int tlen, int rstreason)
3117{
3118#ifdef INET
3119 struct ip *ip;
3120#endif
3121#ifdef INET6
3122 struct ip6_hdr *ip6;
3123#endif
3124
3125 if (tp != NULL) {
3126 INP_WLOCK_ASSERT(tp->t_inpcb);
3127 }
3128
3129 /* Don't bother if destination was broadcast/multicast. */
3130 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3131 goto drop;
3132#ifdef INET6
3133 if (mtod(m, struct ip *)->ip_v == 6) {
3134 ip6 = mtod(m, struct ip6_hdr *);
3135 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3136 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3137 goto drop;
3138 /* IPv6 anycast check is done at tcp6_input() */
3139 }
3140#endif
3141#if defined(INET) && defined(INET6)
3142 else
3143#endif
3144#ifdef INET
3145 {
3146 ip = mtod(m, struct ip *);
3147 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3148 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3149 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3150 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3151 goto drop;
3152 }
3153#endif
3154
3155 /* Perform bandwidth limiting. */
3156 if (badport_bandlim(rstreason) < 0)
3157 goto drop;
3158
3159 /* tcp_respond consumes the mbuf chain. */
3160 if (th->th_flags & TH_ACK) {
3161 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3162 th->th_ack, TH_RST);
3163 } else {
3164 if (th->th_flags & TH_SYN)
3165 tlen++;
3166 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3167 (tcp_seq)0, TH_RST|TH_ACK);
3168 }
3169 return;
3170drop:
3171 m_freem(m);
3172}
3173
3174/*
3175 * Parse TCP options and place in tcpopt.
3176 */
3177static void
3178tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3179{
3180 int opt, optlen;
3181
3182 to->to_flags = 0;
3183 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3184 opt = cp[0];
3185 if (opt == TCPOPT_EOL)
3186 break;
3187 if (opt == TCPOPT_NOP)
3188 optlen = 1;
3189 else {
3190 if (cnt < 2)
3191 break;
3192 optlen = cp[1];
3193 if (optlen < 2 || optlen > cnt)
3194 break;
3195 }
3196 switch (opt) {
3197 case TCPOPT_MAXSEG:
3198 if (optlen != TCPOLEN_MAXSEG)
3199 continue;
3200 if (!(flags & TO_SYN))
3201 continue;
3202 to->to_flags |= TOF_MSS;
3203 bcopy((char *)cp + 2,
3204 (char *)&to->to_mss, sizeof(to->to_mss));
3205 to->to_mss = ntohs(to->to_mss);
3206 break;
3207 case TCPOPT_WINDOW:
3208 if (optlen != TCPOLEN_WINDOW)
3209 continue;
3210 if (!(flags & TO_SYN))
3211 continue;
3212 to->to_flags |= TOF_SCALE;
3213 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3214 break;
3215 case TCPOPT_TIMESTAMP:
3216 if (optlen != TCPOLEN_TIMESTAMP)
3217 continue;
3218 to->to_flags |= TOF_TS;
3219 bcopy((char *)cp + 2,
3220 (char *)&to->to_tsval, sizeof(to->to_tsval));
3221 to->to_tsval = ntohl(to->to_tsval);
3222 bcopy((char *)cp + 6,
3223 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3224 to->to_tsecr = ntohl(to->to_tsecr);
3225 break;
3226#ifdef TCP_SIGNATURE
3227 /*
3228 * XXX In order to reply to a host which has set the
3229 * TCP_SIGNATURE option in its initial SYN, we have to
3230 * record the fact that the option was observed here
3231 * for the syncache code to perform the correct response.
3232 */
3233 case TCPOPT_SIGNATURE:
3234 if (optlen != TCPOLEN_SIGNATURE)
3235 continue;
3236 to->to_flags |= TOF_SIGNATURE;
3237 to->to_signature = cp + 2;
3238 break;
3239#endif
3240 case TCPOPT_SACK_PERMITTED:
3241 if (optlen != TCPOLEN_SACK_PERMITTED)
3242 continue;
3243 if (!(flags & TO_SYN))
3244 continue;
3245 if (!V_tcp_do_sack)
3246 continue;
3247 to->to_flags |= TOF_SACKPERM;
3248 break;
3249 case TCPOPT_SACK:
3250 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3251 continue;
3252 if (flags & TO_SYN)
3253 continue;
3254 to->to_flags |= TOF_SACK;
3255 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3256 to->to_sacks = cp + 2;
3257 TCPSTAT_INC(tcps_sack_rcv_blocks);
3258 break;
3259 default:
3260 continue;
3261 }
3262 }
3263}
3264
3265/*
3266 * Pull out of band byte out of a segment so
3267 * it doesn't appear in the user's data queue.
3268 * It is still reflected in the segment length for
3269 * sequencing purposes.
3270 */
3271static void
3272tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3273 int off)
3274{
3275 int cnt = off + th->th_urp - 1;
3276
3277 while (cnt >= 0) {
3278 if (m->m_len > cnt) {
3279 char *cp = mtod(m, caddr_t) + cnt;
3280 struct tcpcb *tp = sototcpcb(so);
3281
3282 INP_WLOCK_ASSERT(tp->t_inpcb);
3283
3284 tp->t_iobc = *cp;
3285 tp->t_oobflags |= TCPOOB_HAVEDATA;
3286 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3287 m->m_len--;
3288 if (m->m_flags & M_PKTHDR)
3289 m->m_pkthdr.len--;
3290 return;
3291 }
3292 cnt -= m->m_len;
3293 m = m->m_next;
3294 if (m == NULL)
3295 break;
3296 }
3297 panic("tcp_pulloutofband");
3298}
3299
3300/*
3301 * Collect new round-trip time estimate
3302 * and update averages and current timeout.
3303 */
3304static void
3305tcp_xmit_timer(struct tcpcb *tp, int rtt)
3306{
3307 int delta;
3308
3309 INP_WLOCK_ASSERT(tp->t_inpcb);
3310
3311 TCPSTAT_INC(tcps_rttupdated);
3312 tp->t_rttupdated++;
3313 if (tp->t_srtt != 0) {
3314 /*
3315 * srtt is stored as fixed point with 5 bits after the
3316 * binary point (i.e., scaled by 8). The following magic
3317 * is equivalent to the smoothing algorithm in rfc793 with
3318 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3319 * point). Adjust rtt to origin 0.
3320 */
3321 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3322 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3323
3324 if ((tp->t_srtt += delta) <= 0)
3325 tp->t_srtt = 1;
3326
3327 /*
3328 * We accumulate a smoothed rtt variance (actually, a
3329 * smoothed mean difference), then set the retransmit
3330 * timer to smoothed rtt + 4 times the smoothed variance.
3331 * rttvar is stored as fixed point with 4 bits after the
3332 * binary point (scaled by 16). The following is
3333 * equivalent to rfc793 smoothing with an alpha of .75
3334 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3335 * rfc793's wired-in beta.
3336 */
3337 if (delta < 0)
3338 delta = -delta;
3339 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3340 if ((tp->t_rttvar += delta) <= 0)
3341 tp->t_rttvar = 1;
3342 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3343 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3344 } else {
3345 /*
3346 * No rtt measurement yet - use the unsmoothed rtt.
3347 * Set the variance to half the rtt (so our first
3348 * retransmit happens at 3*rtt).
3349 */
3350 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3351 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3352 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3353 }
3354 tp->t_rtttime = 0;
3355 tp->t_rxtshift = 0;
3356
3357 /*
3358 * the retransmit should happen at rtt + 4 * rttvar.
3359 * Because of the way we do the smoothing, srtt and rttvar
3360 * will each average +1/2 tick of bias. When we compute
3361 * the retransmit timer, we want 1/2 tick of rounding and
3362 * 1 extra tick because of +-1/2 tick uncertainty in the
3363 * firing of the timer. The bias will give us exactly the
3364 * 1.5 tick we need. But, because the bias is
3365 * statistical, we have to test that we don't drop below
3366 * the minimum feasible timer (which is 2 ticks).
3367 */
3368 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3369 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3370
3371 /*
3372 * We received an ack for a packet that wasn't retransmitted;
3373 * it is probably safe to discard any error indications we've
3374 * received recently. This isn't quite right, but close enough
3375 * for now (a route might have failed after we sent a segment,
3376 * and the return path might not be symmetrical).
3377 */
3378 tp->t_softerror = 0;
3379}
3380
3381/*
3382 * Determine a reasonable value for maxseg size.
3383 * If the route is known, check route for mtu.
3384 * If none, use an mss that can be handled on the outgoing interface
3385 * without forcing IP to fragment. If no route is found, route has no mtu,
3386 * or the destination isn't local, use a default, hopefully conservative
3387 * size (usually 512 or the default IP max size, but no more than the mtu
3388 * of the interface), as we can't discover anything about intervening
3389 * gateways or networks. We also initialize the congestion/slow start
3390 * window to be a single segment if the destination isn't local.
3391 * While looking at the routing entry, we also initialize other path-dependent
3392 * parameters from pre-set or cached values in the routing entry.
3393 *
3394 * Also take into account the space needed for options that we
3395 * send regularly. Make maxseg shorter by that amount to assure
3396 * that we can send maxseg amount of data even when the options
3397 * are present. Store the upper limit of the length of options plus
3398 * data in maxopd.
3399 *
3400 * NOTE that this routine is only called when we process an incoming
3401 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3402 * settings are handled in tcp_mssopt().
3403 */
3404void
3405tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3406 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3407{
3408 int mss = 0;
3409 u_long maxmtu = 0;
3410 struct inpcb *inp = tp->t_inpcb;
3411 struct hc_metrics_lite metrics;
3412 int origoffer;
3413#ifdef INET6
3414 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3415 size_t min_protoh = isipv6 ?
3416 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3417 sizeof (struct tcpiphdr);
3418#else
3419 const size_t min_protoh = sizeof(struct tcpiphdr);
3420#endif
3421
3422 INP_WLOCK_ASSERT(tp->t_inpcb);
3423
3424 if (mtuoffer != -1) {
3425 KASSERT(offer == -1, ("%s: conflict", __func__));
3426 offer = mtuoffer - min_protoh;
3427 }
3428 origoffer = offer;
3429
3430 /* Initialize. */
3431#ifdef INET6
3432 if (isipv6) {
3433 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3434 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
3435 }
3436#endif
3437#if defined(INET) && defined(INET6)
3438 else
3439#endif
3440#ifdef INET
3441 {
3442 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3443 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
3444 }
3445#endif
3446
3447 /*
3448 * No route to sender, stay with default mss and return.
3449 */
3450 if (maxmtu == 0) {
3451 /*
3452 * In case we return early we need to initialize metrics
3453 * to a defined state as tcp_hc_get() would do for us
3454 * if there was no cache hit.
3455 */
3456 if (metricptr != NULL)
3457 bzero(metricptr, sizeof(struct hc_metrics_lite));
3458 return;
3459 }
3460
3461 /* What have we got? */
3462 switch (offer) {
3463 case 0:
3464 /*
3465 * Offer == 0 means that there was no MSS on the SYN
3466 * segment, in this case we use tcp_mssdflt as
3467 * already assigned to t_maxopd above.
3468 */
3469 offer = tp->t_maxopd;
3470 break;
3471
3472 case -1:
3473 /*
3474 * Offer == -1 means that we didn't receive SYN yet.
3475 */
3476 /* FALLTHROUGH */
3477
3478 default:
3479 /*
3480 * Prevent DoS attack with too small MSS. Round up
3481 * to at least minmss.
3482 */
3483 offer = max(offer, V_tcp_minmss);
3484 }
3485
3486 /*
3487 * rmx information is now retrieved from tcp_hostcache.
3488 */
3489 tcp_hc_get(&inp->inp_inc, &metrics);
3490 if (metricptr != NULL)
3491 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3492
3493 /*
3494 * If there's a discovered mtu int tcp hostcache, use it
3495 * else, use the link mtu.
3496 */
3497 if (metrics.rmx_mtu)
3498 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3499 else {
3500#ifdef INET6
3501 if (isipv6) {
3502 mss = maxmtu - min_protoh;
3503 if (!V_path_mtu_discovery &&
3504 !in6_localaddr(&inp->in6p_faddr))
3505 mss = min(mss, V_tcp_v6mssdflt);
3506 }
3507#endif
3508#if defined(INET) && defined(INET6)
3509 else
3510#endif
3511#ifdef INET
3512 {
3513 mss = maxmtu - min_protoh;
3514 if (!V_path_mtu_discovery &&
3515 !in_localaddr(inp->inp_faddr))
3516 mss = min(mss, V_tcp_mssdflt);
3517 }
3518#endif
3519 /*
3520 * XXX - The above conditional (mss = maxmtu - min_protoh)
3521 * probably violates the TCP spec.
3522 * The problem is that, since we don't know the
3523 * other end's MSS, we are supposed to use a conservative
3524 * default. But, if we do that, then MTU discovery will
3525 * never actually take place, because the conservative
3526 * default is much less than the MTUs typically seen
3527 * on the Internet today. For the moment, we'll sweep
3528 * this under the carpet.
3529 *
3530 * The conservative default might not actually be a problem
3531 * if the only case this occurs is when sending an initial
3532 * SYN with options and data to a host we've never talked
3533 * to before. Then, they will reply with an MSS value which
3534 * will get recorded and the new parameters should get
3535 * recomputed. For Further Study.
3536 */
3537 }
3538 mss = min(mss, offer);
3539
3540 /*
3541 * Sanity check: make sure that maxopd will be large
3542 * enough to allow some data on segments even if the
3543 * all the option space is used (40bytes). Otherwise
3544 * funny things may happen in tcp_output.
3545 */
3546 mss = max(mss, 64);
3547
3548 /*
3549 * maxopd stores the maximum length of data AND options
3550 * in a segment; maxseg is the amount of data in a normal
3551 * segment. We need to store this value (maxopd) apart
3552 * from maxseg, because now every segment carries options
3553 * and thus we normally have somewhat less data in segments.
3554 */
3555 tp->t_maxopd = mss;
3556
3557 /*
3558 * origoffer==-1 indicates that no segments were received yet.
3559 * In this case we just guess.
3560 */
3561 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
3562 (origoffer == -1 ||
3563 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3564 mss -= TCPOLEN_TSTAMP_APPA;
3565
3566 tp->t_maxseg = mss;
3567}
3568
3569void
3570tcp_mss(struct tcpcb *tp, int offer)
3571{
3572 int mss;
3573 u_long bufsize;
3574 struct inpcb *inp;
3575 struct socket *so;
3576 struct hc_metrics_lite metrics;
3577 struct tcp_ifcap cap;
3578
3579 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3580
3581 bzero(&cap, sizeof(cap));
3582 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3583
3584 mss = tp->t_maxseg;
3585 inp = tp->t_inpcb;
3586
3587 /*
3588 * If there's a pipesize, change the socket buffer to that size,
3589 * don't change if sb_hiwat is different than default (then it
3590 * has been changed on purpose with setsockopt).
3591 * Make the socket buffers an integral number of mss units;
3592 * if the mss is larger than the socket buffer, decrease the mss.
3593 */
3594 so = inp->inp_socket;
3595 SOCKBUF_LOCK(&so->so_snd);
3596 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3597 bufsize = metrics.rmx_sendpipe;
3598 else
3599 bufsize = so->so_snd.sb_hiwat;
3600 if (bufsize < mss)
3601 mss = bufsize;
3602 else {
3603 bufsize = roundup(bufsize, mss);
3604 if (bufsize > sb_max)
3605 bufsize = sb_max;
3606 if (bufsize > so->so_snd.sb_hiwat)
3607 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3608 }
3609 SOCKBUF_UNLOCK(&so->so_snd);
3610 tp->t_maxseg = mss;
3611
3612 SOCKBUF_LOCK(&so->so_rcv);
3613 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3614 bufsize = metrics.rmx_recvpipe;
3615 else
3616 bufsize = so->so_rcv.sb_hiwat;
3617 if (bufsize > mss) {
3618 bufsize = roundup(bufsize, mss);
3619 if (bufsize > sb_max)
3620 bufsize = sb_max;
3621 if (bufsize > so->so_rcv.sb_hiwat)
3622 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3623 }
3624 SOCKBUF_UNLOCK(&so->so_rcv);
3625
3626 /* Check the interface for TSO capabilities. */
3627 if (cap.ifcap & CSUM_TSO) {
3628 tp->t_flags |= TF_TSO;
3629 tp->t_tsomax = cap.tsomax;
3630 }
3631}
3632
3633/*
3634 * Determine the MSS option to send on an outgoing SYN.
3635 */
3636int
3637tcp_mssopt(struct in_conninfo *inc)
3638{
3639 int mss = 0;
3640 u_long maxmtu = 0;
3641 u_long thcmtu = 0;
3642 size_t min_protoh;
3643
3644 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3645
3646#ifdef INET6
3647 if (inc->inc_flags & INC_ISIPV6) {
3648 mss = V_tcp_v6mssdflt;
3649 maxmtu = tcp_maxmtu6(inc, NULL);
3650 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3651 }
3652#endif
3653#if defined(INET) && defined(INET6)
3654 else
3655#endif
3656#ifdef INET
3657 {
3658 mss = V_tcp_mssdflt;
3659 maxmtu = tcp_maxmtu(inc, NULL);
3660 min_protoh = sizeof(struct tcpiphdr);
3661 }
3662#endif
3663#if defined(INET6) || defined(INET)
3664 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3665#endif
3666
3667 if (maxmtu && thcmtu)
3668 mss = min(maxmtu, thcmtu) - min_protoh;
3669 else if (maxmtu || thcmtu)
3670 mss = max(maxmtu, thcmtu) - min_protoh;
3671
3672 return (mss);
3673}
3674
3675
3676/*
3677 * On a partial ack arrives, force the retransmission of the
3678 * next unacknowledged segment. Do not clear tp->t_dupacks.
3679 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3680 * be started again.
3681 */
3682static void
3683tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3684{
3685 tcp_seq onxt = tp->snd_nxt;
3686 u_long ocwnd = tp->snd_cwnd;
3687
3688 INP_WLOCK_ASSERT(tp->t_inpcb);
3689
3690 tcp_timer_activate(tp, TT_REXMT, 0);
3691 tp->t_rtttime = 0;
3692 tp->snd_nxt = th->th_ack;
3693 /*
3694 * Set snd_cwnd to one segment beyond acknowledged offset.
3695 * (tp->snd_una has not yet been updated when this function is called.)
3696 */
3697 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th);
3698 tp->t_flags |= TF_ACKNOW;
3699 (void) tcp_output(tp);
3700 tp->snd_cwnd = ocwnd;
3701 if (SEQ_GT(onxt, tp->snd_nxt))
3702 tp->snd_nxt = onxt;
3703 /*
3704 * Partial window deflation. Relies on fact that tp->snd_una
3705 * not updated yet.
3706 */
3707 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3708 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3709 else
3710 tp->snd_cwnd = 0;
3711 tp->snd_cwnd += tp->t_maxseg;
3712}
3010 break;
3011
3012 /*
3013 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3014 * starting the time-wait timer, turning off the other
3015 * standard timers.
3016 */
3017 case TCPS_FIN_WAIT_2:
3018 INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
3019 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata "
3020 "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
3021 ti_locked));
3022
3023 tcp_twstart(tp);
3024 INP_INFO_WUNLOCK(&V_tcbinfo);
3025 return;
3026 }
3027 }
3028 if (ti_locked == TI_WLOCKED)
3029 INP_INFO_WUNLOCK(&V_tcbinfo);
3030 ti_locked = TI_UNLOCKED;
3031
3032#ifdef TCPDEBUG
3033 if (so->so_options & SO_DEBUG)
3034 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3035 &tcp_savetcp, 0);
3036#endif
3037
3038 /*
3039 * Return any desired output.
3040 */
3041 if (needoutput || (tp->t_flags & TF_ACKNOW))
3042 (void) tcp_output(tp);
3043
3044check_delack:
3045 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
3046 __func__, ti_locked));
3047 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3048 INP_WLOCK_ASSERT(tp->t_inpcb);
3049
3050 if (tp->t_flags & TF_DELACK) {
3051 tp->t_flags &= ~TF_DELACK;
3052 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
3053 }
3054 INP_WUNLOCK(tp->t_inpcb);
3055 return;
3056
3057dropafterack:
3058 /*
3059 * Generate an ACK dropping incoming segment if it occupies
3060 * sequence space, where the ACK reflects our state.
3061 *
3062 * We can now skip the test for the RST flag since all
3063 * paths to this code happen after packets containing
3064 * RST have been dropped.
3065 *
3066 * In the SYN-RECEIVED state, don't send an ACK unless the
3067 * segment we received passes the SYN-RECEIVED ACK test.
3068 * If it fails send a RST. This breaks the loop in the
3069 * "LAND" DoS attack, and also prevents an ACK storm
3070 * between two listening ports that have been sent forged
3071 * SYN segments, each with the source address of the other.
3072 */
3073 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3074 (SEQ_GT(tp->snd_una, th->th_ack) ||
3075 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3076 rstreason = BANDLIM_RST_OPENPORT;
3077 goto dropwithreset;
3078 }
3079#ifdef TCPDEBUG
3080 if (so->so_options & SO_DEBUG)
3081 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3082 &tcp_savetcp, 0);
3083#endif
3084 if (ti_locked == TI_WLOCKED)
3085 INP_INFO_WUNLOCK(&V_tcbinfo);
3086 ti_locked = TI_UNLOCKED;
3087
3088 tp->t_flags |= TF_ACKNOW;
3089 (void) tcp_output(tp);
3090 INP_WUNLOCK(tp->t_inpcb);
3091 m_freem(m);
3092 return;
3093
3094dropwithreset:
3095 if (ti_locked == TI_WLOCKED)
3096 INP_INFO_WUNLOCK(&V_tcbinfo);
3097 ti_locked = TI_UNLOCKED;
3098
3099 if (tp != NULL) {
3100 tcp_dropwithreset(m, th, tp, tlen, rstreason);
3101 INP_WUNLOCK(tp->t_inpcb);
3102 } else
3103 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
3104 return;
3105
3106drop:
3107 if (ti_locked == TI_WLOCKED) {
3108 INP_INFO_WUNLOCK(&V_tcbinfo);
3109 ti_locked = TI_UNLOCKED;
3110 }
3111#ifdef INVARIANTS
3112 else
3113 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
3114#endif
3115
3116 /*
3117 * Drop space held by incoming segment and return.
3118 */
3119#ifdef TCPDEBUG
3120 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3121 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3122 &tcp_savetcp, 0);
3123#endif
3124 if (tp != NULL)
3125 INP_WUNLOCK(tp->t_inpcb);
3126 m_freem(m);
3127}
3128
3129/*
3130 * Issue RST and make ACK acceptable to originator of segment.
3131 * The mbuf must still include the original packet header.
3132 * tp may be NULL.
3133 */
3134static void
3135tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
3136 int tlen, int rstreason)
3137{
3138#ifdef INET
3139 struct ip *ip;
3140#endif
3141#ifdef INET6
3142 struct ip6_hdr *ip6;
3143#endif
3144
3145 if (tp != NULL) {
3146 INP_WLOCK_ASSERT(tp->t_inpcb);
3147 }
3148
3149 /* Don't bother if destination was broadcast/multicast. */
3150 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3151 goto drop;
3152#ifdef INET6
3153 if (mtod(m, struct ip *)->ip_v == 6) {
3154 ip6 = mtod(m, struct ip6_hdr *);
3155 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3156 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3157 goto drop;
3158 /* IPv6 anycast check is done at tcp6_input() */
3159 }
3160#endif
3161#if defined(INET) && defined(INET6)
3162 else
3163#endif
3164#ifdef INET
3165 {
3166 ip = mtod(m, struct ip *);
3167 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3168 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3169 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3170 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3171 goto drop;
3172 }
3173#endif
3174
3175 /* Perform bandwidth limiting. */
3176 if (badport_bandlim(rstreason) < 0)
3177 goto drop;
3178
3179 /* tcp_respond consumes the mbuf chain. */
3180 if (th->th_flags & TH_ACK) {
3181 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
3182 th->th_ack, TH_RST);
3183 } else {
3184 if (th->th_flags & TH_SYN)
3185 tlen++;
3186 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3187 (tcp_seq)0, TH_RST|TH_ACK);
3188 }
3189 return;
3190drop:
3191 m_freem(m);
3192}
3193
3194/*
3195 * Parse TCP options and place in tcpopt.
3196 */
3197static void
3198tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
3199{
3200 int opt, optlen;
3201
3202 to->to_flags = 0;
3203 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3204 opt = cp[0];
3205 if (opt == TCPOPT_EOL)
3206 break;
3207 if (opt == TCPOPT_NOP)
3208 optlen = 1;
3209 else {
3210 if (cnt < 2)
3211 break;
3212 optlen = cp[1];
3213 if (optlen < 2 || optlen > cnt)
3214 break;
3215 }
3216 switch (opt) {
3217 case TCPOPT_MAXSEG:
3218 if (optlen != TCPOLEN_MAXSEG)
3219 continue;
3220 if (!(flags & TO_SYN))
3221 continue;
3222 to->to_flags |= TOF_MSS;
3223 bcopy((char *)cp + 2,
3224 (char *)&to->to_mss, sizeof(to->to_mss));
3225 to->to_mss = ntohs(to->to_mss);
3226 break;
3227 case TCPOPT_WINDOW:
3228 if (optlen != TCPOLEN_WINDOW)
3229 continue;
3230 if (!(flags & TO_SYN))
3231 continue;
3232 to->to_flags |= TOF_SCALE;
3233 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
3234 break;
3235 case TCPOPT_TIMESTAMP:
3236 if (optlen != TCPOLEN_TIMESTAMP)
3237 continue;
3238 to->to_flags |= TOF_TS;
3239 bcopy((char *)cp + 2,
3240 (char *)&to->to_tsval, sizeof(to->to_tsval));
3241 to->to_tsval = ntohl(to->to_tsval);
3242 bcopy((char *)cp + 6,
3243 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
3244 to->to_tsecr = ntohl(to->to_tsecr);
3245 break;
3246#ifdef TCP_SIGNATURE
3247 /*
3248 * XXX In order to reply to a host which has set the
3249 * TCP_SIGNATURE option in its initial SYN, we have to
3250 * record the fact that the option was observed here
3251 * for the syncache code to perform the correct response.
3252 */
3253 case TCPOPT_SIGNATURE:
3254 if (optlen != TCPOLEN_SIGNATURE)
3255 continue;
3256 to->to_flags |= TOF_SIGNATURE;
3257 to->to_signature = cp + 2;
3258 break;
3259#endif
3260 case TCPOPT_SACK_PERMITTED:
3261 if (optlen != TCPOLEN_SACK_PERMITTED)
3262 continue;
3263 if (!(flags & TO_SYN))
3264 continue;
3265 if (!V_tcp_do_sack)
3266 continue;
3267 to->to_flags |= TOF_SACKPERM;
3268 break;
3269 case TCPOPT_SACK:
3270 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
3271 continue;
3272 if (flags & TO_SYN)
3273 continue;
3274 to->to_flags |= TOF_SACK;
3275 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
3276 to->to_sacks = cp + 2;
3277 TCPSTAT_INC(tcps_sack_rcv_blocks);
3278 break;
3279 default:
3280 continue;
3281 }
3282 }
3283}
3284
3285/*
3286 * Pull out of band byte out of a segment so
3287 * it doesn't appear in the user's data queue.
3288 * It is still reflected in the segment length for
3289 * sequencing purposes.
3290 */
3291static void
3292tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
3293 int off)
3294{
3295 int cnt = off + th->th_urp - 1;
3296
3297 while (cnt >= 0) {
3298 if (m->m_len > cnt) {
3299 char *cp = mtod(m, caddr_t) + cnt;
3300 struct tcpcb *tp = sototcpcb(so);
3301
3302 INP_WLOCK_ASSERT(tp->t_inpcb);
3303
3304 tp->t_iobc = *cp;
3305 tp->t_oobflags |= TCPOOB_HAVEDATA;
3306 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
3307 m->m_len--;
3308 if (m->m_flags & M_PKTHDR)
3309 m->m_pkthdr.len--;
3310 return;
3311 }
3312 cnt -= m->m_len;
3313 m = m->m_next;
3314 if (m == NULL)
3315 break;
3316 }
3317 panic("tcp_pulloutofband");
3318}
3319
3320/*
3321 * Collect new round-trip time estimate
3322 * and update averages and current timeout.
3323 */
3324static void
3325tcp_xmit_timer(struct tcpcb *tp, int rtt)
3326{
3327 int delta;
3328
3329 INP_WLOCK_ASSERT(tp->t_inpcb);
3330
3331 TCPSTAT_INC(tcps_rttupdated);
3332 tp->t_rttupdated++;
3333 if (tp->t_srtt != 0) {
3334 /*
3335 * srtt is stored as fixed point with 5 bits after the
3336 * binary point (i.e., scaled by 8). The following magic
3337 * is equivalent to the smoothing algorithm in rfc793 with
3338 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
3339 * point). Adjust rtt to origin 0.
3340 */
3341 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3342 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3343
3344 if ((tp->t_srtt += delta) <= 0)
3345 tp->t_srtt = 1;
3346
3347 /*
3348 * We accumulate a smoothed rtt variance (actually, a
3349 * smoothed mean difference), then set the retransmit
3350 * timer to smoothed rtt + 4 times the smoothed variance.
3351 * rttvar is stored as fixed point with 4 bits after the
3352 * binary point (scaled by 16). The following is
3353 * equivalent to rfc793 smoothing with an alpha of .75
3354 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
3355 * rfc793's wired-in beta.
3356 */
3357 if (delta < 0)
3358 delta = -delta;
3359 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3360 if ((tp->t_rttvar += delta) <= 0)
3361 tp->t_rttvar = 1;
3362 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3363 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3364 } else {
3365 /*
3366 * No rtt measurement yet - use the unsmoothed rtt.
3367 * Set the variance to half the rtt (so our first
3368 * retransmit happens at 3*rtt).
3369 */
3370 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3371 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3372 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3373 }
3374 tp->t_rtttime = 0;
3375 tp->t_rxtshift = 0;
3376
3377 /*
3378 * the retransmit should happen at rtt + 4 * rttvar.
3379 * Because of the way we do the smoothing, srtt and rttvar
3380 * will each average +1/2 tick of bias. When we compute
3381 * the retransmit timer, we want 1/2 tick of rounding and
3382 * 1 extra tick because of +-1/2 tick uncertainty in the
3383 * firing of the timer. The bias will give us exactly the
3384 * 1.5 tick we need. But, because the bias is
3385 * statistical, we have to test that we don't drop below
3386 * the minimum feasible timer (which is 2 ticks).
3387 */
3388 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3389 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
3390
3391 /*
3392 * We received an ack for a packet that wasn't retransmitted;
3393 * it is probably safe to discard any error indications we've
3394 * received recently. This isn't quite right, but close enough
3395 * for now (a route might have failed after we sent a segment,
3396 * and the return path might not be symmetrical).
3397 */
3398 tp->t_softerror = 0;
3399}
3400
3401/*
3402 * Determine a reasonable value for maxseg size.
3403 * If the route is known, check route for mtu.
3404 * If none, use an mss that can be handled on the outgoing interface
3405 * without forcing IP to fragment. If no route is found, route has no mtu,
3406 * or the destination isn't local, use a default, hopefully conservative
3407 * size (usually 512 or the default IP max size, but no more than the mtu
3408 * of the interface), as we can't discover anything about intervening
3409 * gateways or networks. We also initialize the congestion/slow start
3410 * window to be a single segment if the destination isn't local.
3411 * While looking at the routing entry, we also initialize other path-dependent
3412 * parameters from pre-set or cached values in the routing entry.
3413 *
3414 * Also take into account the space needed for options that we
3415 * send regularly. Make maxseg shorter by that amount to assure
3416 * that we can send maxseg amount of data even when the options
3417 * are present. Store the upper limit of the length of options plus
3418 * data in maxopd.
3419 *
3420 * NOTE that this routine is only called when we process an incoming
3421 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS
3422 * settings are handled in tcp_mssopt().
3423 */
3424void
3425tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
3426 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap)
3427{
3428 int mss = 0;
3429 u_long maxmtu = 0;
3430 struct inpcb *inp = tp->t_inpcb;
3431 struct hc_metrics_lite metrics;
3432 int origoffer;
3433#ifdef INET6
3434 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
3435 size_t min_protoh = isipv6 ?
3436 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
3437 sizeof (struct tcpiphdr);
3438#else
3439 const size_t min_protoh = sizeof(struct tcpiphdr);
3440#endif
3441
3442 INP_WLOCK_ASSERT(tp->t_inpcb);
3443
3444 if (mtuoffer != -1) {
3445 KASSERT(offer == -1, ("%s: conflict", __func__));
3446 offer = mtuoffer - min_protoh;
3447 }
3448 origoffer = offer;
3449
3450 /* Initialize. */
3451#ifdef INET6
3452 if (isipv6) {
3453 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap);
3454 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
3455 }
3456#endif
3457#if defined(INET) && defined(INET6)
3458 else
3459#endif
3460#ifdef INET
3461 {
3462 maxmtu = tcp_maxmtu(&inp->inp_inc, cap);
3463 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
3464 }
3465#endif
3466
3467 /*
3468 * No route to sender, stay with default mss and return.
3469 */
3470 if (maxmtu == 0) {
3471 /*
3472 * In case we return early we need to initialize metrics
3473 * to a defined state as tcp_hc_get() would do for us
3474 * if there was no cache hit.
3475 */
3476 if (metricptr != NULL)
3477 bzero(metricptr, sizeof(struct hc_metrics_lite));
3478 return;
3479 }
3480
3481 /* What have we got? */
3482 switch (offer) {
3483 case 0:
3484 /*
3485 * Offer == 0 means that there was no MSS on the SYN
3486 * segment, in this case we use tcp_mssdflt as
3487 * already assigned to t_maxopd above.
3488 */
3489 offer = tp->t_maxopd;
3490 break;
3491
3492 case -1:
3493 /*
3494 * Offer == -1 means that we didn't receive SYN yet.
3495 */
3496 /* FALLTHROUGH */
3497
3498 default:
3499 /*
3500 * Prevent DoS attack with too small MSS. Round up
3501 * to at least minmss.
3502 */
3503 offer = max(offer, V_tcp_minmss);
3504 }
3505
3506 /*
3507 * rmx information is now retrieved from tcp_hostcache.
3508 */
3509 tcp_hc_get(&inp->inp_inc, &metrics);
3510 if (metricptr != NULL)
3511 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
3512
3513 /*
3514 * If there's a discovered mtu int tcp hostcache, use it
3515 * else, use the link mtu.
3516 */
3517 if (metrics.rmx_mtu)
3518 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
3519 else {
3520#ifdef INET6
3521 if (isipv6) {
3522 mss = maxmtu - min_protoh;
3523 if (!V_path_mtu_discovery &&
3524 !in6_localaddr(&inp->in6p_faddr))
3525 mss = min(mss, V_tcp_v6mssdflt);
3526 }
3527#endif
3528#if defined(INET) && defined(INET6)
3529 else
3530#endif
3531#ifdef INET
3532 {
3533 mss = maxmtu - min_protoh;
3534 if (!V_path_mtu_discovery &&
3535 !in_localaddr(inp->inp_faddr))
3536 mss = min(mss, V_tcp_mssdflt);
3537 }
3538#endif
3539 /*
3540 * XXX - The above conditional (mss = maxmtu - min_protoh)
3541 * probably violates the TCP spec.
3542 * The problem is that, since we don't know the
3543 * other end's MSS, we are supposed to use a conservative
3544 * default. But, if we do that, then MTU discovery will
3545 * never actually take place, because the conservative
3546 * default is much less than the MTUs typically seen
3547 * on the Internet today. For the moment, we'll sweep
3548 * this under the carpet.
3549 *
3550 * The conservative default might not actually be a problem
3551 * if the only case this occurs is when sending an initial
3552 * SYN with options and data to a host we've never talked
3553 * to before. Then, they will reply with an MSS value which
3554 * will get recorded and the new parameters should get
3555 * recomputed. For Further Study.
3556 */
3557 }
3558 mss = min(mss, offer);
3559
3560 /*
3561 * Sanity check: make sure that maxopd will be large
3562 * enough to allow some data on segments even if the
3563 * all the option space is used (40bytes). Otherwise
3564 * funny things may happen in tcp_output.
3565 */
3566 mss = max(mss, 64);
3567
3568 /*
3569 * maxopd stores the maximum length of data AND options
3570 * in a segment; maxseg is the amount of data in a normal
3571 * segment. We need to store this value (maxopd) apart
3572 * from maxseg, because now every segment carries options
3573 * and thus we normally have somewhat less data in segments.
3574 */
3575 tp->t_maxopd = mss;
3576
3577 /*
3578 * origoffer==-1 indicates that no segments were received yet.
3579 * In this case we just guess.
3580 */
3581 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
3582 (origoffer == -1 ||
3583 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
3584 mss -= TCPOLEN_TSTAMP_APPA;
3585
3586 tp->t_maxseg = mss;
3587}
3588
3589void
3590tcp_mss(struct tcpcb *tp, int offer)
3591{
3592 int mss;
3593 u_long bufsize;
3594 struct inpcb *inp;
3595 struct socket *so;
3596 struct hc_metrics_lite metrics;
3597 struct tcp_ifcap cap;
3598
3599 KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
3600
3601 bzero(&cap, sizeof(cap));
3602 tcp_mss_update(tp, offer, -1, &metrics, &cap);
3603
3604 mss = tp->t_maxseg;
3605 inp = tp->t_inpcb;
3606
3607 /*
3608 * If there's a pipesize, change the socket buffer to that size,
3609 * don't change if sb_hiwat is different than default (then it
3610 * has been changed on purpose with setsockopt).
3611 * Make the socket buffers an integral number of mss units;
3612 * if the mss is larger than the socket buffer, decrease the mss.
3613 */
3614 so = inp->inp_socket;
3615 SOCKBUF_LOCK(&so->so_snd);
3616 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe)
3617 bufsize = metrics.rmx_sendpipe;
3618 else
3619 bufsize = so->so_snd.sb_hiwat;
3620 if (bufsize < mss)
3621 mss = bufsize;
3622 else {
3623 bufsize = roundup(bufsize, mss);
3624 if (bufsize > sb_max)
3625 bufsize = sb_max;
3626 if (bufsize > so->so_snd.sb_hiwat)
3627 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
3628 }
3629 SOCKBUF_UNLOCK(&so->so_snd);
3630 tp->t_maxseg = mss;
3631
3632 SOCKBUF_LOCK(&so->so_rcv);
3633 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe)
3634 bufsize = metrics.rmx_recvpipe;
3635 else
3636 bufsize = so->so_rcv.sb_hiwat;
3637 if (bufsize > mss) {
3638 bufsize = roundup(bufsize, mss);
3639 if (bufsize > sb_max)
3640 bufsize = sb_max;
3641 if (bufsize > so->so_rcv.sb_hiwat)
3642 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
3643 }
3644 SOCKBUF_UNLOCK(&so->so_rcv);
3645
3646 /* Check the interface for TSO capabilities. */
3647 if (cap.ifcap & CSUM_TSO) {
3648 tp->t_flags |= TF_TSO;
3649 tp->t_tsomax = cap.tsomax;
3650 }
3651}
3652
3653/*
3654 * Determine the MSS option to send on an outgoing SYN.
3655 */
3656int
3657tcp_mssopt(struct in_conninfo *inc)
3658{
3659 int mss = 0;
3660 u_long maxmtu = 0;
3661 u_long thcmtu = 0;
3662 size_t min_protoh;
3663
3664 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
3665
3666#ifdef INET6
3667 if (inc->inc_flags & INC_ISIPV6) {
3668 mss = V_tcp_v6mssdflt;
3669 maxmtu = tcp_maxmtu6(inc, NULL);
3670 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3671 }
3672#endif
3673#if defined(INET) && defined(INET6)
3674 else
3675#endif
3676#ifdef INET
3677 {
3678 mss = V_tcp_mssdflt;
3679 maxmtu = tcp_maxmtu(inc, NULL);
3680 min_protoh = sizeof(struct tcpiphdr);
3681 }
3682#endif
3683#if defined(INET6) || defined(INET)
3684 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
3685#endif
3686
3687 if (maxmtu && thcmtu)
3688 mss = min(maxmtu, thcmtu) - min_protoh;
3689 else if (maxmtu || thcmtu)
3690 mss = max(maxmtu, thcmtu) - min_protoh;
3691
3692 return (mss);
3693}
3694
3695
3696/*
3697 * On a partial ack arrives, force the retransmission of the
3698 * next unacknowledged segment. Do not clear tp->t_dupacks.
3699 * By setting snd_nxt to ti_ack, this forces retransmission timer to
3700 * be started again.
3701 */
3702static void
3703tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
3704{
3705 tcp_seq onxt = tp->snd_nxt;
3706 u_long ocwnd = tp->snd_cwnd;
3707
3708 INP_WLOCK_ASSERT(tp->t_inpcb);
3709
3710 tcp_timer_activate(tp, TT_REXMT, 0);
3711 tp->t_rtttime = 0;
3712 tp->snd_nxt = th->th_ack;
3713 /*
3714 * Set snd_cwnd to one segment beyond acknowledged offset.
3715 * (tp->snd_una has not yet been updated when this function is called.)
3716 */
3717 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th);
3718 tp->t_flags |= TF_ACKNOW;
3719 (void) tcp_output(tp);
3720 tp->snd_cwnd = ocwnd;
3721 if (SEQ_GT(onxt, tp->snd_nxt))
3722 tp->snd_nxt = onxt;
3723 /*
3724 * Partial window deflation. Relies on fact that tp->snd_una
3725 * not updated yet.
3726 */
3727 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th))
3728 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th);
3729 else
3730 tp->snd_cwnd = 0;
3731 tp->snd_cwnd += tp->t_maxseg;
3732}