tcp_timewait.c revision 221250
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/netinet/tcp_timewait.c 221250 2011-04-30 11:21:29Z bz $");
34
35#include "opt_inet.h"
36#include "opt_inet6.h"
37#include "opt_tcpdebug.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/callout.h>
42#include <sys/kernel.h>
43#include <sys/sysctl.h>
44#include <sys/malloc.h>
45#include <sys/mbuf.h>
46#include <sys/priv.h>
47#include <sys/proc.h>
48#include <sys/socket.h>
49#include <sys/socketvar.h>
50#include <sys/protosw.h>
51#include <sys/random.h>
52
53#include <vm/uma.h>
54
55#include <net/route.h>
56#include <net/if.h>
57#include <net/vnet.h>
58
59#include <netinet/in.h>
60#include <netinet/in_pcb.h>
61#include <netinet/in_systm.h>
62#include <netinet/in_var.h>
63#include <netinet/ip.h>
64#include <netinet/ip_icmp.h>
65#include <netinet/ip_var.h>
66#ifdef INET6
67#include <netinet/ip6.h>
68#include <netinet6/in6_pcb.h>
69#include <netinet6/ip6_var.h>
70#include <netinet6/scope6_var.h>
71#include <netinet6/nd6.h>
72#endif
73#include <netinet/tcp.h>
74#include <netinet/tcp_fsm.h>
75#include <netinet/tcp_seq.h>
76#include <netinet/tcp_timer.h>
77#include <netinet/tcp_var.h>
78#ifdef INET6
79#include <netinet6/tcp6_var.h>
80#endif
81#include <netinet/tcpip.h>
82#ifdef TCPDEBUG
83#include <netinet/tcp_debug.h>
84#endif
85#ifdef INET6
86#include <netinet6/ip6protosw.h>
87#endif
88
89#include <machine/in_cksum.h>
90
91#include <security/mac/mac_framework.h>
92
93static VNET_DEFINE(uma_zone_t, tcptw_zone);
94#define	V_tcptw_zone			VNET(tcptw_zone)
95static int	maxtcptw;
96
97/*
98 * The timed wait queue contains references to each of the TCP sessions
99 * currently in the TIME_WAIT state.  The queue pointers, including the
100 * queue pointers in each tcptw structure, are protected using the global
101 * tcbinfo lock, which must be held over queue iteration and modification.
102 */
103static VNET_DEFINE(TAILQ_HEAD(, tcptw), twq_2msl);
104#define	V_twq_2msl			VNET(twq_2msl)
105
106static void	tcp_tw_2msl_reset(struct tcptw *, int);
107static void	tcp_tw_2msl_stop(struct tcptw *);
108
109static int
110tcptw_auto_size(void)
111{
112	int halfrange;
113
114	/*
115	 * Max out at half the ephemeral port range so that TIME_WAIT
116	 * sockets don't tie up too many ephemeral ports.
117	 */
118	if (V_ipport_lastauto > V_ipport_firstauto)
119		halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
120	else
121		halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
122	/* Protect against goofy port ranges smaller than 32. */
123	return (imin(imax(halfrange, 32), maxsockets / 5));
124}
125
126static int
127sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
128{
129	int error, new;
130
131	if (maxtcptw == 0)
132		new = tcptw_auto_size();
133	else
134		new = maxtcptw;
135	error = sysctl_handle_int(oidp, &new, 0, req);
136	if (error == 0 && req->newptr)
137		if (new >= 32) {
138			maxtcptw = new;
139			uma_zone_set_max(V_tcptw_zone, maxtcptw);
140		}
141	return (error);
142}
143
144SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW,
145    &maxtcptw, 0, sysctl_maxtcptw, "IU",
146    "Maximum number of compressed TCP TIME_WAIT entries");
147
148VNET_DEFINE(int, nolocaltimewait) = 0;
149#define	V_nolocaltimewait	VNET(nolocaltimewait)
150SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_RW,
151    &VNET_NAME(nolocaltimewait), 0,
152    "Do not create compressed TCP TIME_WAIT entries for local connections");
153
154void
155tcp_tw_zone_change(void)
156{
157
158	if (maxtcptw == 0)
159		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
160}
161
162void
163tcp_tw_init(void)
164{
165
166	V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
167	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
168	TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
169	if (maxtcptw == 0)
170		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
171	else
172		uma_zone_set_max(V_tcptw_zone, maxtcptw);
173	TAILQ_INIT(&V_twq_2msl);
174}
175
176#ifdef VIMAGE
177void
178tcp_tw_destroy(void)
179{
180	struct tcptw *tw;
181
182	INP_INFO_WLOCK(&V_tcbinfo);
183	while((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL)
184		tcp_twclose(tw, 0);
185	INP_INFO_WUNLOCK(&V_tcbinfo);
186
187	uma_zdestroy(V_tcptw_zone);
188}
189#endif
190
191/*
192 * Move a TCP connection into TIME_WAIT state.
193 *    tcbinfo is locked.
194 *    inp is locked, and is unlocked before returning.
195 */
196void
197tcp_twstart(struct tcpcb *tp)
198{
199	struct tcptw *tw;
200	struct inpcb *inp = tp->t_inpcb;
201	int acknow;
202	struct socket *so;
203#ifdef INET6
204	int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
205#endif
206
207	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);	/* tcp_tw_2msl_reset(). */
208	INP_WLOCK_ASSERT(inp);
209
210	if (V_nolocaltimewait) {
211		int error = 0;
212#ifdef INET6
213		if (isipv6)
214			error = in6_localaddr(&inp->in6p_faddr);
215#endif
216#if defined(INET6) && defined(INET)
217		else
218#endif
219#ifdef INET
220			error = in_localip(inp->inp_faddr);
221#endif
222		if (error) {
223			tp = tcp_close(tp);
224			if (tp != NULL)
225				INP_WUNLOCK(inp);
226			return;
227		}
228	}
229
230	tw = uma_zalloc(V_tcptw_zone, M_NOWAIT);
231	if (tw == NULL) {
232		tw = tcp_tw_2msl_scan(1);
233		if (tw == NULL) {
234			tp = tcp_close(tp);
235			if (tp != NULL)
236				INP_WUNLOCK(inp);
237			return;
238		}
239	}
240	tw->tw_inpcb = inp;
241
242	/*
243	 * Recover last window size sent.
244	 */
245	tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
246
247	/*
248	 * Set t_recent if timestamps are used on the connection.
249	 */
250	if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
251	    (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
252		tw->t_recent = tp->ts_recent;
253		tw->ts_offset = tp->ts_offset;
254	} else {
255		tw->t_recent = 0;
256		tw->ts_offset = 0;
257	}
258
259	tw->snd_nxt = tp->snd_nxt;
260	tw->rcv_nxt = tp->rcv_nxt;
261	tw->iss     = tp->iss;
262	tw->irs     = tp->irs;
263	tw->t_starttime = tp->t_starttime;
264	tw->tw_time = 0;
265
266/* XXX
267 * If this code will
268 * be used for fin-wait-2 state also, then we may need
269 * a ts_recent from the last segment.
270 */
271	acknow = tp->t_flags & TF_ACKNOW;
272
273	/*
274	 * First, discard tcpcb state, which includes stopping its timers and
275	 * freeing it.  tcp_discardcb() used to also release the inpcb, but
276	 * that work is now done in the caller.
277	 *
278	 * Note: soisdisconnected() call used to be made in tcp_discardcb(),
279	 * and might not be needed here any longer.
280	 */
281	tcp_discardcb(tp);
282	so = inp->inp_socket;
283	soisdisconnected(so);
284	tw->tw_cred = crhold(so->so_cred);
285	SOCK_LOCK(so);
286	tw->tw_so_options = so->so_options;
287	SOCK_UNLOCK(so);
288	if (acknow)
289		tcp_twrespond(tw, TH_ACK);
290	inp->inp_ppcb = tw;
291	inp->inp_flags |= INP_TIMEWAIT;
292	tcp_tw_2msl_reset(tw, 0);
293
294	/*
295	 * If the inpcb owns the sole reference to the socket, then we can
296	 * detach and free the socket as it is not needed in time wait.
297	 */
298	if (inp->inp_flags & INP_SOCKREF) {
299		KASSERT(so->so_state & SS_PROTOREF,
300		    ("tcp_twstart: !SS_PROTOREF"));
301		inp->inp_flags &= ~INP_SOCKREF;
302		INP_WUNLOCK(inp);
303		ACCEPT_LOCK();
304		SOCK_LOCK(so);
305		so->so_state &= ~SS_PROTOREF;
306		sofree(so);
307	} else
308		INP_WUNLOCK(inp);
309}
310
311#if 0
312/*
313 * The appromixate rate of ISN increase of Microsoft TCP stacks;
314 * the actual rate is slightly higher due to the addition of
315 * random positive increments.
316 *
317 * Most other new OSes use semi-randomized ISN values, so we
318 * do not need to worry about them.
319 */
320#define MS_ISN_BYTES_PER_SECOND		250000
321
322/*
323 * Determine if the ISN we will generate has advanced beyond the last
324 * sequence number used by the previous connection.  If so, indicate
325 * that it is safe to recycle this tw socket by returning 1.
326 */
327int
328tcp_twrecycleable(struct tcptw *tw)
329{
330	tcp_seq new_iss = tw->iss;
331	tcp_seq new_irs = tw->irs;
332
333	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
334	new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
335	new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
336
337	if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
338		return (1);
339	else
340		return (0);
341}
342#endif
343
344/*
345 * Returns 1 if the TIME_WAIT state was killed and we should start over,
346 * looking for a pcb in the listen state.  Returns 0 otherwise.
347 */
348int
349tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th,
350    struct mbuf *m, int tlen)
351{
352	struct tcptw *tw;
353	int thflags;
354	tcp_seq seq;
355
356	/* tcbinfo lock required for tcp_twclose(), tcp_tw_2msl_reset(). */
357	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
358	INP_WLOCK_ASSERT(inp);
359
360	/*
361	 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
362	 * still present.  This is undesirable, but temporarily necessary
363	 * until we work out how to handle inpcb's who's timewait state has
364	 * been removed.
365	 */
366	tw = intotw(inp);
367	if (tw == NULL)
368		goto drop;
369
370	thflags = th->th_flags;
371
372	/*
373	 * NOTE: for FIN_WAIT_2 (to be added later),
374	 * must validate sequence number before accepting RST
375	 */
376
377	/*
378	 * If the segment contains RST:
379	 *	Drop the segment - see Stevens, vol. 2, p. 964 and
380	 *      RFC 1337.
381	 */
382	if (thflags & TH_RST)
383		goto drop;
384
385#if 0
386/* PAWS not needed at the moment */
387	/*
388	 * RFC 1323 PAWS: If we have a timestamp reply on this segment
389	 * and it's less than ts_recent, drop it.
390	 */
391	if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
392	    TSTMP_LT(to.to_tsval, tp->ts_recent)) {
393		if ((thflags & TH_ACK) == 0)
394			goto drop;
395		goto ack;
396	}
397	/*
398	 * ts_recent is never updated because we never accept new segments.
399	 */
400#endif
401
402	/*
403	 * If a new connection request is received
404	 * while in TIME_WAIT, drop the old connection
405	 * and start over if the sequence numbers
406	 * are above the previous ones.
407	 */
408	if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
409		tcp_twclose(tw, 0);
410		return (1);
411	}
412
413	/*
414	 * Drop the segment if it does not contain an ACK.
415	 */
416	if ((thflags & TH_ACK) == 0)
417		goto drop;
418
419	/*
420	 * Reset the 2MSL timer if this is a duplicate FIN.
421	 */
422	if (thflags & TH_FIN) {
423		seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
424		if (seq + 1 == tw->rcv_nxt)
425			tcp_tw_2msl_reset(tw, 1);
426	}
427
428	/*
429	 * Acknowledge the segment if it has data or is not a duplicate ACK.
430	 */
431	if (thflags != TH_ACK || tlen != 0 ||
432	    th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
433		tcp_twrespond(tw, TH_ACK);
434drop:
435	INP_WUNLOCK(inp);
436	m_freem(m);
437	return (0);
438}
439
440void
441tcp_twclose(struct tcptw *tw, int reuse)
442{
443	struct socket *so;
444	struct inpcb *inp;
445
446	/*
447	 * At this point, we are in one of two situations:
448	 *
449	 * (1) We have no socket, just an inpcb<->twtcp pair.  We can free
450	 *     all state.
451	 *
452	 * (2) We have a socket -- if we own a reference, release it and
453	 *     notify the socket layer.
454	 */
455	inp = tw->tw_inpcb;
456	KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
457	KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
458	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);	/* tcp_tw_2msl_stop(). */
459	INP_WLOCK_ASSERT(inp);
460
461	tw->tw_inpcb = NULL;
462	tcp_tw_2msl_stop(tw);
463	inp->inp_ppcb = NULL;
464	in_pcbdrop(inp);
465
466	so = inp->inp_socket;
467	if (so != NULL) {
468		/*
469		 * If there's a socket, handle two cases: first, we own a
470		 * strong reference, which we will now release, or we don't
471		 * in which case another reference exists (XXXRW: think
472		 * about this more), and we don't need to take action.
473		 */
474		if (inp->inp_flags & INP_SOCKREF) {
475			inp->inp_flags &= ~INP_SOCKREF;
476			INP_WUNLOCK(inp);
477			ACCEPT_LOCK();
478			SOCK_LOCK(so);
479			KASSERT(so->so_state & SS_PROTOREF,
480			    ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF"));
481			so->so_state &= ~SS_PROTOREF;
482			sofree(so);
483		} else {
484			/*
485			 * If we don't own the only reference, the socket and
486			 * inpcb need to be left around to be handled by
487			 * tcp_usr_detach() later.
488			 */
489			INP_WUNLOCK(inp);
490		}
491	} else
492		in_pcbfree(inp);
493	TCPSTAT_INC(tcps_closed);
494	crfree(tw->tw_cred);
495	tw->tw_cred = NULL;
496	if (reuse)
497		return;
498	uma_zfree(V_tcptw_zone, tw);
499}
500
501int
502tcp_twrespond(struct tcptw *tw, int flags)
503{
504	struct inpcb *inp = tw->tw_inpcb;
505#if defined(INET6) || defined(INET)
506	struct tcphdr *th = NULL;
507#endif
508	struct mbuf *m;
509#ifdef INET
510	struct ip *ip = NULL;
511#endif
512	u_int hdrlen, optlen;
513	int error = 0;			/* Keep compiler happy */
514	struct tcpopt to;
515#ifdef INET6
516	struct ip6_hdr *ip6 = NULL;
517	int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
518#endif
519
520	INP_WLOCK_ASSERT(inp);
521
522	m = m_gethdr(M_DONTWAIT, MT_DATA);
523	if (m == NULL)
524		return (ENOBUFS);
525	m->m_data += max_linkhdr;
526
527#ifdef MAC
528	mac_inpcb_create_mbuf(inp, m);
529#endif
530
531#ifdef INET6
532	if (isipv6) {
533		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
534		ip6 = mtod(m, struct ip6_hdr *);
535		th = (struct tcphdr *)(ip6 + 1);
536		tcpip_fillheaders(inp, ip6, th);
537	}
538#endif
539#if defined(INET6) && defined(INET)
540	else
541#endif
542#ifdef INET
543	{
544		hdrlen = sizeof(struct tcpiphdr);
545		ip = mtod(m, struct ip *);
546		th = (struct tcphdr *)(ip + 1);
547		tcpip_fillheaders(inp, ip, th);
548	}
549#endif
550	to.to_flags = 0;
551
552	/*
553	 * Send a timestamp and echo-reply if both our side and our peer
554	 * have sent timestamps in our SYN's and this is not a RST.
555	 */
556	if (tw->t_recent && flags == TH_ACK) {
557		to.to_flags |= TOF_TS;
558		to.to_tsval = ticks + tw->ts_offset;
559		to.to_tsecr = tw->t_recent;
560	}
561	optlen = tcp_addoptions(&to, (u_char *)(th + 1));
562
563	m->m_len = hdrlen + optlen;
564	m->m_pkthdr.len = m->m_len;
565
566	KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
567
568	th->th_seq = htonl(tw->snd_nxt);
569	th->th_ack = htonl(tw->rcv_nxt);
570	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
571	th->th_flags = flags;
572	th->th_win = htons(tw->last_win);
573
574#ifdef INET6
575	if (isipv6) {
576		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
577		    sizeof(struct tcphdr) + optlen);
578		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
579		error = ip6_output(m, inp->in6p_outputopts, NULL,
580		    (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
581	}
582#endif
583#if defined(INET6) && defined(INET)
584	else
585#endif
586#ifdef INET
587	{
588		th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
589		    htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
590		m->m_pkthdr.csum_flags = CSUM_TCP;
591		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
592		ip->ip_len = m->m_pkthdr.len;
593		if (V_path_mtu_discovery)
594			ip->ip_off |= IP_DF;
595		error = ip_output(m, inp->inp_options, NULL,
596		    ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
597		    NULL, inp);
598	}
599#endif
600	if (flags & TH_ACK)
601		TCPSTAT_INC(tcps_sndacks);
602	else
603		TCPSTAT_INC(tcps_sndctrl);
604	TCPSTAT_INC(tcps_sndtotal);
605	return (error);
606}
607
608static void
609tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
610{
611
612	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
613	INP_WLOCK_ASSERT(tw->tw_inpcb);
614	if (rearm)
615		TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
616	tw->tw_time = ticks + 2 * tcp_msl;
617	TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
618}
619
620static void
621tcp_tw_2msl_stop(struct tcptw *tw)
622{
623
624	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
625	TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
626}
627
628struct tcptw *
629tcp_tw_2msl_scan(int reuse)
630{
631	struct tcptw *tw;
632
633	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
634	for (;;) {
635		tw = TAILQ_FIRST(&V_twq_2msl);
636		if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0))
637			break;
638		INP_WLOCK(tw->tw_inpcb);
639		tcp_twclose(tw, reuse);
640		if (reuse)
641			return (tw);
642	}
643	return (NULL);
644}
645