tcp_timer.c revision 242263
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)tcp_timer.c	8.2 (Berkeley) 5/24/95
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/netinet/tcp_timer.c 242263 2012-10-28 19:20:23Z andre $");
34
35#include "opt_inet6.h"
36#include "opt_tcpdebug.h"
37
38#include <sys/param.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/mbuf.h>
42#include <sys/mutex.h>
43#include <sys/protosw.h>
44#include <sys/smp.h>
45#include <sys/socket.h>
46#include <sys/socketvar.h>
47#include <sys/sysctl.h>
48#include <sys/systm.h>
49
50#include <net/if.h>
51#include <net/route.h>
52#include <net/vnet.h>
53
54#include <netinet/cc.h>
55#include <netinet/in.h>
56#include <netinet/in_pcb.h>
57#include <netinet/in_systm.h>
58#ifdef INET6
59#include <netinet6/in6_pcb.h>
60#endif
61#include <netinet/ip_var.h>
62#include <netinet/tcp_fsm.h>
63#include <netinet/tcp_timer.h>
64#include <netinet/tcp_var.h>
65#include <netinet/tcpip.h>
66#ifdef TCPDEBUG
67#include <netinet/tcp_debug.h>
68#endif
69
70int	tcp_keepinit;
71SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW,
72    &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "time to establish connection");
73
74int	tcp_keepidle;
75SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW,
76    &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "time before keepalive probes begin");
77
78int	tcp_keepintvl;
79SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW,
80    &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "time between keepalive probes");
81
82int	tcp_delacktime;
83SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, CTLTYPE_INT|CTLFLAG_RW,
84    &tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
85    "Time before a delayed ACK is sent");
86
87int	tcp_msl;
88SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW,
89    &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
90
91int	tcp_rexmit_min;
92SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT|CTLFLAG_RW,
93    &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I",
94    "Minimum Retransmission Timeout");
95
96int	tcp_rexmit_slop;
97SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW,
98    &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I",
99    "Retransmission Timer Slop");
100
101static int	always_keepalive = 1;
102SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
103    &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
104
105int    tcp_fast_finwait2_recycle = 0;
106SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW,
107    &tcp_fast_finwait2_recycle, 0,
108    "Recycle closed FIN_WAIT_2 connections faster");
109
110int    tcp_finwait2_timeout;
111SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, CTLTYPE_INT|CTLFLAG_RW,
112    &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", "FIN-WAIT2 timeout");
113
114int	tcp_keepcnt = TCPTV_KEEPCNT;
115SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0,
116    "Number of keepalive probes to send");
117
118	/* max idle probes */
119int	tcp_maxpersistidle;
120
121static int	per_cpu_timers = 0;
122SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW,
123    &per_cpu_timers , 0, "run tcp timers on all cpus");
124
125#define	INP_CPU(inp)	(per_cpu_timers ? (!CPU_ABSENT(((inp)->inp_flowid % (mp_maxid+1))) ? \
126		((inp)->inp_flowid % (mp_maxid+1)) : curcpu) : 0)
127
128/*
129 * Tcp protocol timeout routine called every 500 ms.
130 * Updates timestamps used for TCP
131 * causes finite state machine actions if timers expire.
132 */
133void
134tcp_slowtimo(void)
135{
136	VNET_ITERATOR_DECL(vnet_iter);
137
138	VNET_LIST_RLOCK_NOSLEEP();
139	VNET_FOREACH(vnet_iter) {
140		CURVNET_SET(vnet_iter);
141		INP_INFO_WLOCK(&V_tcbinfo);
142		(void) tcp_tw_2msl_scan(0);
143		INP_INFO_WUNLOCK(&V_tcbinfo);
144		CURVNET_RESTORE();
145	}
146	VNET_LIST_RUNLOCK_NOSLEEP();
147}
148
149int	tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
150    { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
151
152int	tcp_backoff[TCP_MAXRXTSHIFT + 1] =
153    { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 };
154
155static int tcp_totbackoff = 2559;	/* sum of tcp_backoff[] */
156
157static int tcp_timer_race;
158SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_race, CTLFLAG_RD, &tcp_timer_race,
159    0, "Count of t_inpcb races on tcp_discardcb");
160
161/*
162 * TCP timer processing.
163 */
164
165void
166tcp_timer_delack(void *xtp)
167{
168	struct tcpcb *tp = xtp;
169	struct inpcb *inp;
170	CURVNET_SET(tp->t_vnet);
171
172	inp = tp->t_inpcb;
173	/*
174	 * XXXRW: While this assert is in fact correct, bugs in the tcpcb
175	 * tear-down mean we need it as a work-around for races between
176	 * timers and tcp_discardcb().
177	 *
178	 * KASSERT(inp != NULL, ("tcp_timer_delack: inp == NULL"));
179	 */
180	if (inp == NULL) {
181		tcp_timer_race++;
182		CURVNET_RESTORE();
183		return;
184	}
185	INP_WLOCK(inp);
186	if (callout_pending(&tp->t_timers->tt_delack) ||
187	    !callout_active(&tp->t_timers->tt_delack)) {
188		INP_WUNLOCK(inp);
189		CURVNET_RESTORE();
190		return;
191	}
192	callout_deactivate(&tp->t_timers->tt_delack);
193	if ((inp->inp_flags & INP_DROPPED) != 0) {
194		INP_WUNLOCK(inp);
195		CURVNET_RESTORE();
196		return;
197	}
198
199	tp->t_flags |= TF_ACKNOW;
200	TCPSTAT_INC(tcps_delack);
201	(void) tcp_output(tp);
202	INP_WUNLOCK(inp);
203	CURVNET_RESTORE();
204}
205
206void
207tcp_timer_2msl(void *xtp)
208{
209	struct tcpcb *tp = xtp;
210	struct inpcb *inp;
211	CURVNET_SET(tp->t_vnet);
212#ifdef TCPDEBUG
213	int ostate;
214
215	ostate = tp->t_state;
216#endif
217	/*
218	 * XXXRW: Does this actually happen?
219	 */
220	INP_INFO_WLOCK(&V_tcbinfo);
221	inp = tp->t_inpcb;
222	/*
223	 * XXXRW: While this assert is in fact correct, bugs in the tcpcb
224	 * tear-down mean we need it as a work-around for races between
225	 * timers and tcp_discardcb().
226	 *
227	 * KASSERT(inp != NULL, ("tcp_timer_2msl: inp == NULL"));
228	 */
229	if (inp == NULL) {
230		tcp_timer_race++;
231		INP_INFO_WUNLOCK(&V_tcbinfo);
232		CURVNET_RESTORE();
233		return;
234	}
235	INP_WLOCK(inp);
236	tcp_free_sackholes(tp);
237	if (callout_pending(&tp->t_timers->tt_2msl) ||
238	    !callout_active(&tp->t_timers->tt_2msl)) {
239		INP_WUNLOCK(tp->t_inpcb);
240		INP_INFO_WUNLOCK(&V_tcbinfo);
241		CURVNET_RESTORE();
242		return;
243	}
244	callout_deactivate(&tp->t_timers->tt_2msl);
245	if ((inp->inp_flags & INP_DROPPED) != 0) {
246		INP_WUNLOCK(inp);
247		INP_INFO_WUNLOCK(&V_tcbinfo);
248		CURVNET_RESTORE();
249		return;
250	}
251	/*
252	 * 2 MSL timeout in shutdown went off.  If we're closed but
253	 * still waiting for peer to close and connection has been idle
254	 * too long, or if 2MSL time is up from TIME_WAIT, delete connection
255	 * control block.  Otherwise, check again in a bit.
256	 *
257	 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed,
258	 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it.
259	 * Ignore fact that there were recent incoming segments.
260	 */
261	if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 &&
262	    tp->t_inpcb && tp->t_inpcb->inp_socket &&
263	    (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
264		TCPSTAT_INC(tcps_finwait2_drops);
265		tp = tcp_close(tp);
266	} else {
267		if (tp->t_state != TCPS_TIME_WAIT &&
268		   ticks - tp->t_rcvtime <= TP_MAXIDLE(tp))
269		       callout_reset_on(&tp->t_timers->tt_2msl,
270			   TP_KEEPINTVL(tp), tcp_timer_2msl, tp, INP_CPU(inp));
271	       else
272		       tp = tcp_close(tp);
273       }
274
275#ifdef TCPDEBUG
276	if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
277		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
278			  PRU_SLOWTIMO);
279#endif
280	if (tp != NULL)
281		INP_WUNLOCK(inp);
282	INP_INFO_WUNLOCK(&V_tcbinfo);
283	CURVNET_RESTORE();
284}
285
286void
287tcp_timer_keep(void *xtp)
288{
289	struct tcpcb *tp = xtp;
290	struct tcptemp *t_template;
291	struct inpcb *inp;
292	CURVNET_SET(tp->t_vnet);
293#ifdef TCPDEBUG
294	int ostate;
295
296	ostate = tp->t_state;
297#endif
298	INP_INFO_WLOCK(&V_tcbinfo);
299	inp = tp->t_inpcb;
300	/*
301	 * XXXRW: While this assert is in fact correct, bugs in the tcpcb
302	 * tear-down mean we need it as a work-around for races between
303	 * timers and tcp_discardcb().
304	 *
305	 * KASSERT(inp != NULL, ("tcp_timer_keep: inp == NULL"));
306	 */
307	if (inp == NULL) {
308		tcp_timer_race++;
309		INP_INFO_WUNLOCK(&V_tcbinfo);
310		CURVNET_RESTORE();
311		return;
312	}
313	INP_WLOCK(inp);
314	if (callout_pending(&tp->t_timers->tt_keep) ||
315	    !callout_active(&tp->t_timers->tt_keep)) {
316		INP_WUNLOCK(inp);
317		INP_INFO_WUNLOCK(&V_tcbinfo);
318		CURVNET_RESTORE();
319		return;
320	}
321	callout_deactivate(&tp->t_timers->tt_keep);
322	if ((inp->inp_flags & INP_DROPPED) != 0) {
323		INP_WUNLOCK(inp);
324		INP_INFO_WUNLOCK(&V_tcbinfo);
325		CURVNET_RESTORE();
326		return;
327	}
328	/*
329	 * Keep-alive timer went off; send something
330	 * or drop connection if idle for too long.
331	 */
332	TCPSTAT_INC(tcps_keeptimeo);
333	if (tp->t_state < TCPS_ESTABLISHED)
334		goto dropit;
335	if ((always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
336	    tp->t_state <= TCPS_CLOSING) {
337		if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
338			goto dropit;
339		/*
340		 * Send a packet designed to force a response
341		 * if the peer is up and reachable:
342		 * either an ACK if the connection is still alive,
343		 * or an RST if the peer has closed the connection
344		 * due to timeout or reboot.
345		 * Using sequence number tp->snd_una-1
346		 * causes the transmitted zero-length segment
347		 * to lie outside the receive window;
348		 * by the protocol spec, this requires the
349		 * correspondent TCP to respond.
350		 */
351		TCPSTAT_INC(tcps_keepprobe);
352		t_template = tcpip_maketemplate(inp);
353		if (t_template) {
354			tcp_respond(tp, t_template->tt_ipgen,
355				    &t_template->tt_t, (struct mbuf *)NULL,
356				    tp->rcv_nxt, tp->snd_una - 1, 0);
357			free(t_template, M_TEMP);
358		}
359		callout_reset_on(&tp->t_timers->tt_keep, TP_KEEPINTVL(tp),
360		    tcp_timer_keep, tp, INP_CPU(inp));
361	} else
362		callout_reset_on(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp),
363		    tcp_timer_keep, tp, INP_CPU(inp));
364
365#ifdef TCPDEBUG
366	if (inp->inp_socket->so_options & SO_DEBUG)
367		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
368			  PRU_SLOWTIMO);
369#endif
370	INP_WUNLOCK(inp);
371	INP_INFO_WUNLOCK(&V_tcbinfo);
372	CURVNET_RESTORE();
373	return;
374
375dropit:
376	TCPSTAT_INC(tcps_keepdrops);
377	tp = tcp_drop(tp, ETIMEDOUT);
378
379#ifdef TCPDEBUG
380	if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
381		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
382			  PRU_SLOWTIMO);
383#endif
384	if (tp != NULL)
385		INP_WUNLOCK(tp->t_inpcb);
386	INP_INFO_WUNLOCK(&V_tcbinfo);
387	CURVNET_RESTORE();
388}
389
390void
391tcp_timer_persist(void *xtp)
392{
393	struct tcpcb *tp = xtp;
394	struct inpcb *inp;
395	CURVNET_SET(tp->t_vnet);
396#ifdef TCPDEBUG
397	int ostate;
398
399	ostate = tp->t_state;
400#endif
401	INP_INFO_WLOCK(&V_tcbinfo);
402	inp = tp->t_inpcb;
403	/*
404	 * XXXRW: While this assert is in fact correct, bugs in the tcpcb
405	 * tear-down mean we need it as a work-around for races between
406	 * timers and tcp_discardcb().
407	 *
408	 * KASSERT(inp != NULL, ("tcp_timer_persist: inp == NULL"));
409	 */
410	if (inp == NULL) {
411		tcp_timer_race++;
412		INP_INFO_WUNLOCK(&V_tcbinfo);
413		CURVNET_RESTORE();
414		return;
415	}
416	INP_WLOCK(inp);
417	if (callout_pending(&tp->t_timers->tt_persist) ||
418	    !callout_active(&tp->t_timers->tt_persist)) {
419		INP_WUNLOCK(inp);
420		INP_INFO_WUNLOCK(&V_tcbinfo);
421		CURVNET_RESTORE();
422		return;
423	}
424	callout_deactivate(&tp->t_timers->tt_persist);
425	if ((inp->inp_flags & INP_DROPPED) != 0) {
426		INP_WUNLOCK(inp);
427		INP_INFO_WUNLOCK(&V_tcbinfo);
428		CURVNET_RESTORE();
429		return;
430	}
431	/*
432	 * Persistance timer into zero window.
433	 * Force a byte to be output, if possible.
434	 */
435	TCPSTAT_INC(tcps_persisttimeo);
436	/*
437	 * Hack: if the peer is dead/unreachable, we do not
438	 * time out if the window is closed.  After a full
439	 * backoff, drop the connection if the idle time
440	 * (no responses to probes) reaches the maximum
441	 * backoff that we would use if retransmitting.
442	 */
443	if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
444	    (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
445	     ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
446		TCPSTAT_INC(tcps_persistdrop);
447		tp = tcp_drop(tp, ETIMEDOUT);
448		goto out;
449	}
450	tcp_setpersist(tp);
451	tp->t_flags |= TF_FORCEDATA;
452	(void) tcp_output(tp);
453	tp->t_flags &= ~TF_FORCEDATA;
454
455out:
456#ifdef TCPDEBUG
457	if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
458		tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO);
459#endif
460	if (tp != NULL)
461		INP_WUNLOCK(inp);
462	INP_INFO_WUNLOCK(&V_tcbinfo);
463	CURVNET_RESTORE();
464}
465
466void
467tcp_timer_rexmt(void * xtp)
468{
469	struct tcpcb *tp = xtp;
470	CURVNET_SET(tp->t_vnet);
471	int rexmt;
472	int headlocked;
473	struct inpcb *inp;
474#ifdef TCPDEBUG
475	int ostate;
476
477	ostate = tp->t_state;
478#endif
479	INP_INFO_RLOCK(&V_tcbinfo);
480	inp = tp->t_inpcb;
481	/*
482	 * XXXRW: While this assert is in fact correct, bugs in the tcpcb
483	 * tear-down mean we need it as a work-around for races between
484	 * timers and tcp_discardcb().
485	 *
486	 * KASSERT(inp != NULL, ("tcp_timer_rexmt: inp == NULL"));
487	 */
488	if (inp == NULL) {
489		tcp_timer_race++;
490		INP_INFO_RUNLOCK(&V_tcbinfo);
491		CURVNET_RESTORE();
492		return;
493	}
494	INP_WLOCK(inp);
495	if (callout_pending(&tp->t_timers->tt_rexmt) ||
496	    !callout_active(&tp->t_timers->tt_rexmt)) {
497		INP_WUNLOCK(inp);
498		INP_INFO_RUNLOCK(&V_tcbinfo);
499		CURVNET_RESTORE();
500		return;
501	}
502	callout_deactivate(&tp->t_timers->tt_rexmt);
503	if ((inp->inp_flags & INP_DROPPED) != 0) {
504		INP_WUNLOCK(inp);
505		INP_INFO_RUNLOCK(&V_tcbinfo);
506		CURVNET_RESTORE();
507		return;
508	}
509	tcp_free_sackholes(tp);
510	/*
511	 * Retransmission timer went off.  Message has not
512	 * been acked within retransmit interval.  Back off
513	 * to a longer retransmit interval and retransmit one segment.
514	 */
515	if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
516		tp->t_rxtshift = TCP_MAXRXTSHIFT;
517		TCPSTAT_INC(tcps_timeoutdrop);
518		in_pcbref(inp);
519		INP_INFO_RUNLOCK(&V_tcbinfo);
520		INP_WUNLOCK(inp);
521		INP_INFO_WLOCK(&V_tcbinfo);
522		INP_WLOCK(inp);
523		if (in_pcbrele_wlocked(inp)) {
524			INP_INFO_WUNLOCK(&V_tcbinfo);
525			CURVNET_RESTORE();
526			return;
527		}
528		if (inp->inp_flags & INP_DROPPED) {
529			INP_WUNLOCK(inp);
530			INP_INFO_WUNLOCK(&V_tcbinfo);
531			CURVNET_RESTORE();
532			return;
533		}
534
535		tp = tcp_drop(tp, tp->t_softerror ?
536			      tp->t_softerror : ETIMEDOUT);
537		headlocked = 1;
538		goto out;
539	}
540	INP_INFO_RUNLOCK(&V_tcbinfo);
541	headlocked = 0;
542	if (tp->t_state == TCPS_SYN_SENT) {
543		/*
544		 * If the SYN was retransmitted, indicate CWND to be
545		 * limited to 1 segment in cc_conn_init().
546		 */
547		tp->snd_cwnd = 1;
548	} else if (tp->t_rxtshift == 1) {
549		/*
550		 * first retransmit; record ssthresh and cwnd so they can
551		 * be recovered if this turns out to be a "bad" retransmit.
552		 * A retransmit is considered "bad" if an ACK for this
553		 * segment is received within RTT/2 interval; the assumption
554		 * here is that the ACK was already in flight.  See
555		 * "On Estimating End-to-End Network Path Properties" by
556		 * Allman and Paxson for more details.
557		 */
558		tp->snd_cwnd_prev = tp->snd_cwnd;
559		tp->snd_ssthresh_prev = tp->snd_ssthresh;
560		tp->snd_recover_prev = tp->snd_recover;
561		if (IN_FASTRECOVERY(tp->t_flags))
562			tp->t_flags |= TF_WASFRECOVERY;
563		else
564			tp->t_flags &= ~TF_WASFRECOVERY;
565		if (IN_CONGRECOVERY(tp->t_flags))
566			tp->t_flags |= TF_WASCRECOVERY;
567		else
568			tp->t_flags &= ~TF_WASCRECOVERY;
569		tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
570		tp->t_flags |= TF_PREVVALID;
571	} else
572		tp->t_flags &= ~TF_PREVVALID;
573	TCPSTAT_INC(tcps_rexmttimeo);
574	if (tp->t_state == TCPS_SYN_SENT)
575		rexmt = TCPTV_RTOBASE * tcp_syn_backoff[tp->t_rxtshift];
576	else
577		rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
578	TCPT_RANGESET(tp->t_rxtcur, rexmt,
579		      tp->t_rttmin, TCPTV_REXMTMAX);
580	/*
581	 * Disable rfc1323 if we haven't got any response to
582	 * our third SYN to work-around some broken terminal servers
583	 * (most of which have hopefully been retired) that have bad VJ
584	 * header compression code which trashes TCP segments containing
585	 * unknown-to-them TCP options.
586	 */
587	if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3))
588		tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
589	/*
590	 * If we backed off this far, our srtt estimate is probably bogus.
591	 * Clobber it so we'll take the next rtt measurement as our srtt;
592	 * move the current srtt into rttvar to keep the current
593	 * retransmit times until then.
594	 */
595	if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
596#ifdef INET6
597		if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
598			in6_losing(tp->t_inpcb);
599#endif
600		tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
601		tp->t_srtt = 0;
602	}
603	tp->snd_nxt = tp->snd_una;
604	tp->snd_recover = tp->snd_max;
605	/*
606	 * Force a segment to be sent.
607	 */
608	tp->t_flags |= TF_ACKNOW;
609	/*
610	 * If timing a segment in this window, stop the timer.
611	 */
612	tp->t_rtttime = 0;
613
614	cc_cong_signal(tp, NULL, CC_RTO);
615
616	(void) tcp_output(tp);
617
618out:
619#ifdef TCPDEBUG
620	if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
621		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
622			  PRU_SLOWTIMO);
623#endif
624	if (tp != NULL)
625		INP_WUNLOCK(inp);
626	if (headlocked)
627		INP_INFO_WUNLOCK(&V_tcbinfo);
628	CURVNET_RESTORE();
629}
630
631void
632tcp_timer_activate(struct tcpcb *tp, int timer_type, u_int delta)
633{
634	struct callout *t_callout;
635	void *f_callout;
636	struct inpcb *inp = tp->t_inpcb;
637	int cpu = INP_CPU(inp);
638
639#ifdef TCP_OFFLOAD
640	if (tp->t_flags & TF_TOE)
641		return;
642#endif
643
644	switch (timer_type) {
645		case TT_DELACK:
646			t_callout = &tp->t_timers->tt_delack;
647			f_callout = tcp_timer_delack;
648			break;
649		case TT_REXMT:
650			t_callout = &tp->t_timers->tt_rexmt;
651			f_callout = tcp_timer_rexmt;
652			break;
653		case TT_PERSIST:
654			t_callout = &tp->t_timers->tt_persist;
655			f_callout = tcp_timer_persist;
656			break;
657		case TT_KEEP:
658			t_callout = &tp->t_timers->tt_keep;
659			f_callout = tcp_timer_keep;
660			break;
661		case TT_2MSL:
662			t_callout = &tp->t_timers->tt_2msl;
663			f_callout = tcp_timer_2msl;
664			break;
665		default:
666			panic("bad timer_type");
667		}
668	if (delta == 0) {
669		callout_stop(t_callout);
670	} else {
671		callout_reset_on(t_callout, delta, f_callout, tp, cpu);
672	}
673}
674
675int
676tcp_timer_active(struct tcpcb *tp, int timer_type)
677{
678	struct callout *t_callout;
679
680	switch (timer_type) {
681		case TT_DELACK:
682			t_callout = &tp->t_timers->tt_delack;
683			break;
684		case TT_REXMT:
685			t_callout = &tp->t_timers->tt_rexmt;
686			break;
687		case TT_PERSIST:
688			t_callout = &tp->t_timers->tt_persist;
689			break;
690		case TT_KEEP:
691			t_callout = &tp->t_timers->tt_keep;
692			break;
693		case TT_2MSL:
694			t_callout = &tp->t_timers->tt_2msl;
695			break;
696		default:
697			panic("bad timer_type");
698		}
699	return callout_active(t_callout);
700}
701
702#define	ticks_to_msecs(t)	(1000*(t) / hz)
703
704void
705tcp_timer_to_xtimer(struct tcpcb *tp, struct tcp_timer *timer, struct xtcp_timer *xtimer)
706{
707	bzero(xtimer, sizeof(struct xtcp_timer));
708	if (timer == NULL)
709		return;
710	if (callout_active(&timer->tt_delack))
711		xtimer->tt_delack = ticks_to_msecs(timer->tt_delack.c_time - ticks);
712	if (callout_active(&timer->tt_rexmt))
713		xtimer->tt_rexmt = ticks_to_msecs(timer->tt_rexmt.c_time - ticks);
714	if (callout_active(&timer->tt_persist))
715		xtimer->tt_persist = ticks_to_msecs(timer->tt_persist.c_time - ticks);
716	if (callout_active(&timer->tt_keep))
717		xtimer->tt_keep = ticks_to_msecs(timer->tt_keep.c_time - ticks);
718	if (callout_active(&timer->tt_2msl))
719		xtimer->tt_2msl = ticks_to_msecs(timer->tt_2msl.c_time - ticks);
720	xtimer->t_rcvtime = ticks_to_msecs(ticks - tp->t_rcvtime);
721}
722