Deleted Added
full compact
tcp_timer.c (50477) tcp_timer.c (50673)
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 17 unchanged lines hidden (view full) ---

26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 17 unchanged lines hidden (view full) ---

26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
34 * $FreeBSD: head/sys/netinet/tcp_timer.c 50477 1999-08-28 01:08:13Z peter $
34 * $FreeBSD: head/sys/netinet/tcp_timer.c 50673 1999-08-30 21:17:07Z jlemon $
35 */
36
37#include "opt_compat.h"
38#include "opt_tcpdebug.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>

--- 15 unchanged lines hidden (view full) ---

58#include <netinet/tcp_seq.h>
59#include <netinet/tcp_timer.h>
60#include <netinet/tcp_var.h>
61#include <netinet/tcpip.h>
62#ifdef TCPDEBUG
63#include <netinet/tcp_debug.h>
64#endif
65
35 */
36
37#include "opt_compat.h"
38#include "opt_tcpdebug.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>

--- 15 unchanged lines hidden (view full) ---

58#include <netinet/tcp_seq.h>
59#include <netinet/tcp_timer.h>
60#include <netinet/tcp_var.h>
61#include <netinet/tcpip.h>
62#ifdef TCPDEBUG
63#include <netinet/tcp_debug.h>
64#endif
65
66int tcp_keepinit = TCPTV_KEEP_INIT;
66int tcp_keepinit;
67SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
68 CTLFLAG_RW, &tcp_keepinit , 0, "");
69
67SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
68 CTLFLAG_RW, &tcp_keepinit , 0, "");
69
70int tcp_keepidle = TCPTV_KEEP_IDLE;
70int tcp_keepidle;
71SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
72 CTLFLAG_RW, &tcp_keepidle , 0, "");
73
71SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
72 CTLFLAG_RW, &tcp_keepidle , 0, "");
73
74static int tcp_keepintvl = TCPTV_KEEPINTVL;
74int tcp_keepintvl;
75SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
76 CTLFLAG_RW, &tcp_keepintvl , 0, "");
77
75SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
76 CTLFLAG_RW, &tcp_keepintvl , 0, "");
77
78int tcp_delacktime;
79SYSCTL_INT(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, CTLFLAG_RW,
80 &tcp_delacktime, 0, "Time before a delayed ACK is sent");
81
82int tcp_msl;
83SYSCTL_INT(_net_inet_tcp, OID_AUTO, msl, CTLFLAG_RW,
84 &tcp_msl, 0, "Maximum segment lifetime");
85
78static int always_keepalive = 0;
79SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
80 &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
81
82static int tcp_keepcnt = TCPTV_KEEPCNT;
83 /* max idle probes */
86static int always_keepalive = 0;
87SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
88 &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
89
90static int tcp_keepcnt = TCPTV_KEEPCNT;
91 /* max idle probes */
84static int tcp_maxpersistidle = TCPTV_KEEP_IDLE;
92int tcp_maxpersistidle;
85 /* max idle time in persist */
86int tcp_maxidle;
87
88/*
93 /* max idle time in persist */
94int tcp_maxidle;
95
96/*
89 * Fast timeout routine for processing delayed acks
90 */
91void
92tcp_fasttimo()
93{
94 register struct inpcb *inp;
95 register struct tcpcb *tp;
96 int s;
97
98 if (tcp_delack_enabled) {
99 s = splnet();
100 for (inp = tcb.lh_first; inp != NULL; inp = inp->inp_list.le_next) {
101 if ((tp = (struct tcpcb *)inp->inp_ppcb) &&
102 (tp->t_flags & TF_DELACK)) {
103 tp->t_flags &= ~TF_DELACK;
104 tp->t_flags |= TF_ACKNOW;
105 tcpstat.tcps_delack++;
106 (void) tcp_output(tp);
107 }
108 }
109 splx(s);
110 }
111}
112
113/*
114 * Tcp protocol timeout routine called every 500 ms.
97 * Tcp protocol timeout routine called every 500 ms.
115 * Updates the timers in all active tcb's and
98 * Updates timestamps used for TCP
116 * causes finite state machine actions if timers expire.
117 */
118void
119tcp_slowtimo()
120{
99 * causes finite state machine actions if timers expire.
100 */
101void
102tcp_slowtimo()
103{
121 register struct inpcb *ip, *ipnxt;
122 register struct tcpcb *tp;
123 register int i;
124 int s;
104 int s;
125#ifdef TCPDEBUG
126 int ostate;
127#endif
128
129 s = splnet();
130
131 tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
132
105
106 s = splnet();
107
108 tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
109
133 ip = tcb.lh_first;
134 if (ip == NULL) {
135 splx(s);
136 return;
137 }
138 /*
139 * Search through tcb's and update active timers.
140 */
141 for (; ip != NULL; ip = ipnxt) {
142 ipnxt = ip->inp_list.le_next;
143 tp = intotcpcb(ip);
144 if (tp == 0 || tp->t_state == TCPS_LISTEN)
145 continue;
146 for (i = 0; i < TCPT_NTIMERS; i++) {
147 if (tp->t_timer[i] && --tp->t_timer[i] == 0) {
148#ifdef TCPDEBUG
149 ostate = tp->t_state;
150#endif
151 tp = tcp_timers(tp, i);
152 if (tp == NULL)
153 goto tpgone;
154#ifdef TCPDEBUG
155 if (tp->t_inpcb->inp_socket->so_options
156 & SO_DEBUG)
157 tcp_trace(TA_USER, ostate, tp,
158 (struct tcpiphdr *)0,
159 PRU_SLOWTIMO);
160#endif
161 }
162 }
163 tp->t_idle++;
164 tp->t_duration++;
165 if (tp->t_rtt)
166 tp->t_rtt++;
167tpgone:
168 ;
169 }
170 tcp_iss += TCP_ISSINCR/PR_SLOWHZ; /* increment iss */
171#ifdef TCP_COMPAT_42
172 if ((int)tcp_iss < 0)
173 tcp_iss = TCP_ISSINCR; /* XXX */
174#endif
110 tcp_iss += TCP_ISSINCR/PR_SLOWHZ; /* increment iss */
111#ifdef TCP_COMPAT_42
112 if ((int)tcp_iss < 0)
113 tcp_iss = TCP_ISSINCR; /* XXX */
114#endif
175 tcp_now++; /* for timestamps */
176 splx(s);
177}
178
179/*
180 * Cancel all timers for TCP tp.
181 */
182void
183tcp_canceltimers(tp)
184 struct tcpcb *tp;
185{
115 splx(s);
116}
117
118/*
119 * Cancel all timers for TCP tp.
120 */
121void
122tcp_canceltimers(tp)
123 struct tcpcb *tp;
124{
186 register int i;
187
188 for (i = 0; i < TCPT_NTIMERS; i++)
189 tp->t_timer[i] = 0;
125 callout_stop(tp->tt_2msl);
126 callout_stop(tp->tt_persist);
127 callout_stop(tp->tt_keep);
128 callout_stop(tp->tt_rexmt);
190}
191
192int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
193 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
194
195static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
196
197/*
198 * TCP timer processing.
199 */
129}
130
131int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
132 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
133
134static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
135
136/*
137 * TCP timer processing.
138 */
200struct tcpcb *
201tcp_timers(tp, timer)
202 register struct tcpcb *tp;
203 int timer;
139void
140tcp_timer_delack(xtp)
141 void *xtp;
204{
142{
205 register int rexmt;
143 struct tcpcb *tp = xtp;
144 int s;
206
145
207 switch (timer) {
146 s = splnet();
147 if (callout_pending(tp->tt_delack)) {
148 splx(s);
149 return;
150 }
151 callout_deactivate(tp->tt_delack);
208
152
153 tp->t_flags |= TF_ACKNOW;
154 tcpstat.tcps_delack++;
155 (void) tcp_output(tp);
156 splx(s);
157}
158
159void
160tcp_timer_2msl(xtp)
161 void *xtp;
162{
163 struct tcpcb *tp = xtp;
164 int s;
165#ifdef TCPDEBUG
166 int ostate;
167
168 ostate = tp->t_state;
169#endif
170 s = splnet();
171 if (callout_pending(tp->tt_2msl)) {
172 splx(s);
173 return;
174 }
175 callout_deactivate(tp->tt_2msl);
209 /*
210 * 2 MSL timeout in shutdown went off. If we're closed but
211 * still waiting for peer to close and connection has been idle
212 * too long, or if 2MSL time is up from TIME_WAIT, delete connection
213 * control block. Otherwise, check again in a bit.
214 */
176 /*
177 * 2 MSL timeout in shutdown went off. If we're closed but
178 * still waiting for peer to close and connection has been idle
179 * too long, or if 2MSL time is up from TIME_WAIT, delete connection
180 * control block. Otherwise, check again in a bit.
181 */
215 case TCPT_2MSL:
216 if (tp->t_state != TCPS_TIME_WAIT &&
217 tp->t_idle <= tcp_maxidle)
218 tp->t_timer[TCPT_2MSL] = tcp_keepintvl;
219 else
220 tp = tcp_close(tp);
221 break;
182 if (tp->t_state != TCPS_TIME_WAIT &&
183 (ticks - tp->t_rcvtime) <= tcp_maxidle)
184 callout_reset(tp->tt_2msl, tcp_keepintvl,
185 tcp_timer_2msl, tp);
186 else
187 tp = tcp_close(tp);
222
188
189#ifdef TCPDEBUG
190 if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
191 tcp_trace(TA_USER, ostate, tp, (struct tcpiphdr *)0,
192 PRU_SLOWTIMO);
193#endif
194 splx(s);
195}
196
197void
198tcp_timer_keep(xtp)
199 void *xtp;
200{
201 struct tcpcb *tp = xtp;
202 int s;
203#ifdef TCPDEBUG
204 int ostate;
205
206 ostate = tp->t_state;
207#endif
208 s = splnet();
209 if (callout_pending(tp->tt_keep)) {
210 splx(s);
211 return;
212 }
213 callout_deactivate(tp->tt_keep);
223 /*
214 /*
224 * Retransmission timer went off. Message has not
225 * been acked within retransmit interval. Back off
226 * to a longer retransmit interval and retransmit one segment.
215 * Keep-alive timer went off; send something
216 * or drop connection if idle for too long.
227 */
217 */
228 case TCPT_REXMT:
229 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
230 tp->t_rxtshift = TCP_MAXRXTSHIFT;
231 tcpstat.tcps_timeoutdrop++;
232 tp = tcp_drop(tp, tp->t_softerror ?
233 tp->t_softerror : ETIMEDOUT);
234 break;
235 }
236 tcpstat.tcps_rexmttimeo++;
237 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
238 TCPT_RANGESET(tp->t_rxtcur, rexmt,
239 tp->t_rttmin, TCPTV_REXMTMAX);
240 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
218 tcpstat.tcps_keeptimeo++;
219 if (tp->t_state < TCPS_ESTABLISHED)
220 goto dropit;
221 if ((always_keepalive ||
222 tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
223 tp->t_state <= TCPS_CLOSING) {
224 if ((ticks - tp->t_rcvtime) >= tcp_keepidle + tcp_maxidle)
225 goto dropit;
241 /*
226 /*
242 * If losing, let the lower level know and try for
243 * a better route. Also, if we backed off this far,
244 * our srtt estimate is probably bogus. Clobber it
245 * so we'll take the next rtt measurement as our srtt;
246 * move the current srtt into rttvar to keep the current
247 * retransmit times until then.
227 * Send a packet designed to force a response
228 * if the peer is up and reachable:
229 * either an ACK if the connection is still alive,
230 * or an RST if the peer has closed the connection
231 * due to timeout or reboot.
232 * Using sequence number tp->snd_una-1
233 * causes the transmitted zero-length segment
234 * to lie outside the receive window;
235 * by the protocol spec, this requires the
236 * correspondent TCP to respond.
248 */
237 */
249 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
250 in_losing(tp->t_inpcb);
251 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
252 tp->t_srtt = 0;
253 }
254 tp->snd_nxt = tp->snd_una;
238 tcpstat.tcps_keepprobe++;
239#ifdef TCP_COMPAT_42
255 /*
240 /*
256 * Force a segment to be sent.
241 * The keepalive packet must have nonzero length
242 * to get a 4.2 host to respond.
257 */
243 */
258 tp->t_flags |= TF_ACKNOW;
259 /*
260 * If timing a segment in this window, stop the timer.
261 */
262 tp->t_rtt = 0;
263 /*
264 * Close the congestion window down to one segment
265 * (we'll open it by one segment for each ack we get).
266 * Since we probably have a window's worth of unacked
267 * data accumulated, this "slow start" keeps us from
268 * dumping all that data as back-to-back packets (which
269 * might overwhelm an intermediate gateway).
270 *
271 * There are two phases to the opening: Initially we
272 * open by one mss on each ack. This makes the window
273 * size increase exponentially with time. If the
274 * window is larger than the path can handle, this
275 * exponential growth results in dropped packet(s)
276 * almost immediately. To get more time between
277 * drops but still "push" the network to take advantage
278 * of improving conditions, we switch from exponential
279 * to linear window opening at some threshhold size.
280 * For a threshhold, we use half the current window
281 * size, truncated to a multiple of the mss.
282 *
283 * (the minimum cwnd that will give us exponential
284 * growth is 2 mss. We don't allow the threshhold
285 * to go below this.)
286 */
287 {
288 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
289 if (win < 2)
290 win = 2;
291 tp->snd_cwnd = tp->t_maxseg;
292 tp->snd_ssthresh = win * tp->t_maxseg;
293 tp->t_dupacks = 0;
294 }
295 (void) tcp_output(tp);
296 break;
244 tcp_respond(tp, tp->t_template, (struct mbuf *)NULL,
245 tp->rcv_nxt - 1, tp->snd_una - 1, 0);
246#else
247 tcp_respond(tp, tp->t_template, (struct mbuf *)NULL,
248 tp->rcv_nxt, tp->snd_una - 1, 0);
249#endif
250 callout_reset(tp->tt_keep, tcp_keepintvl, tcp_timer_keep, tp);
251 } else
252 callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp);
297
253
254#ifdef TCPDEBUG
255 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
256 tcp_trace(TA_USER, ostate, tp, (struct tcpiphdr *)0,
257 PRU_SLOWTIMO);
258#endif
259 splx(s);
260 return;
261
262dropit:
263 tcpstat.tcps_keepdrops++;
264 tp = tcp_drop(tp, ETIMEDOUT);
265
266#ifdef TCPDEBUG
267 if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
268 tcp_trace(TA_USER, ostate, tp, (struct tcpiphdr *)0,
269 PRU_SLOWTIMO);
270#endif
271 splx(s);
272}
273
274void
275tcp_timer_persist(xtp)
276 void *xtp;
277{
278 struct tcpcb *tp = xtp;
279 int s;
280#ifdef TCPDEBUG
281 int ostate;
282
283 ostate = tp->t_state;
284#endif
285 s = splnet();
286 if (callout_pending(tp->tt_persist)) {
287 splx(s);
288 return;
289 }
290 callout_deactivate(tp->tt_persist);
298 /*
299 * Persistance timer into zero window.
300 * Force a byte to be output, if possible.
301 */
291 /*
292 * Persistance timer into zero window.
293 * Force a byte to be output, if possible.
294 */
302 case TCPT_PERSIST:
303 tcpstat.tcps_persisttimeo++;
295 tcpstat.tcps_persisttimeo++;
296 /*
297 * Hack: if the peer is dead/unreachable, we do not
298 * time out if the window is closed. After a full
299 * backoff, drop the connection if the idle time
300 * (no responses to probes) reaches the maximum
301 * backoff that we would use if retransmitting.
302 */
303 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
304 ((ticks - tp->t_rcvtime) >= tcp_maxpersistidle ||
305 (ticks - tp->t_rcvtime) >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
306 tcpstat.tcps_persistdrop++;
307 tp = tcp_drop(tp, ETIMEDOUT);
308 goto out;
309 }
310 tcp_setpersist(tp);
311 tp->t_force = 1;
312 (void) tcp_output(tp);
313 tp->t_force = 0;
314
315out:
316#ifdef TCPDEBUG
317 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
318 tcp_trace(TA_USER, ostate, tp, (struct tcpiphdr *)0,
319 PRU_SLOWTIMO);
320#endif
321 splx(s);
322}
323
324void
325tcp_timer_rexmt(xtp)
326 void *xtp;
327{
328 struct tcpcb *tp = xtp;
329 int s;
330 int rexmt;
331#ifdef TCPDEBUG
332 int ostate;
333
334 ostate = tp->t_state;
335#endif
336 s = splnet();
337 if (callout_pending(tp->tt_rexmt)) {
338 splx(s);
339 return;
340 }
341 callout_deactivate(tp->tt_rexmt);
342 /*
343 * Retransmission timer went off. Message has not
344 * been acked within retransmit interval. Back off
345 * to a longer retransmit interval and retransmit one segment.
346 */
347 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
348 tp->t_rxtshift = TCP_MAXRXTSHIFT;
349 tcpstat.tcps_timeoutdrop++;
350 tp = tcp_drop(tp, tp->t_softerror ?
351 tp->t_softerror : ETIMEDOUT);
352 goto out;
353 }
354 if (tp->t_rxtshift == 1) {
304 /*
355 /*
305 * Hack: if the peer is dead/unreachable, we do not
306 * time out if the window is closed. After a full
307 * backoff, drop the connection if the idle time
308 * (no responses to probes) reaches the maximum
309 * backoff that we would use if retransmitting.
356 * first retransmit; record ssthresh and cwnd so they can
357 * be recovered if this turns out to be a "bad" retransmit.
358 * A retransmit is considered "bad" if an ACK for this
359 * segment is received within RTT/2 interval; the assumption
360 * here is that the ACK was already in flight. See
361 * "On Estimating End-to-End Network Path Properties" by
362 * Allman and Paxson for more details.
310 */
363 */
311 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
312 (tp->t_idle >= tcp_maxpersistidle ||
313 tp->t_idle >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
314 tcpstat.tcps_persistdrop++;
315 tp = tcp_drop(tp, ETIMEDOUT);
316 break;
317 }
318 tcp_setpersist(tp);
319 tp->t_force = 1;
320 (void) tcp_output(tp);
321 tp->t_force = 0;
322 break;
323
364 tp->snd_cwnd_prev = tp->snd_cwnd;
365 tp->snd_ssthresh_prev = tp->snd_ssthresh;
366 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
367 }
368 tcpstat.tcps_rexmttimeo++;
369 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
370 TCPT_RANGESET(tp->t_rxtcur, rexmt,
371 tp->t_rttmin, TCPTV_REXMTMAX);
324 /*
372 /*
325 * Keep-alive timer went off; send something
326 * or drop connection if idle for too long.
373 * If losing, let the lower level know and try for
374 * a better route. Also, if we backed off this far,
375 * our srtt estimate is probably bogus. Clobber it
376 * so we'll take the next rtt measurement as our srtt;
377 * move the current srtt into rttvar to keep the current
378 * retransmit times until then.
327 */
379 */
328 case TCPT_KEEP:
329 tcpstat.tcps_keeptimeo++;
330 if (tp->t_state < TCPS_ESTABLISHED)
331 goto dropit;
332 if ((always_keepalive ||
333 tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
334 tp->t_state <= TCPS_CLOSING) {
335 if (tp->t_idle >= tcp_keepidle + tcp_maxidle)
336 goto dropit;
337 /*
338 * Send a packet designed to force a response
339 * if the peer is up and reachable:
340 * either an ACK if the connection is still alive,
341 * or an RST if the peer has closed the connection
342 * due to timeout or reboot.
343 * Using sequence number tp->snd_una-1
344 * causes the transmitted zero-length segment
345 * to lie outside the receive window;
346 * by the protocol spec, this requires the
347 * correspondent TCP to respond.
348 */
349 tcpstat.tcps_keepprobe++;
350#ifdef TCP_COMPAT_42
351 /*
352 * The keepalive packet must have nonzero length
353 * to get a 4.2 host to respond.
354 */
355 tcp_respond(tp, tp->t_template, (struct mbuf *)NULL,
356 tp->rcv_nxt - 1, tp->snd_una - 1, 0);
357#else
358 tcp_respond(tp, tp->t_template, (struct mbuf *)NULL,
359 tp->rcv_nxt, tp->snd_una - 1, 0);
360#endif
361 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
362 } else
363 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
364 break;
365 dropit:
366 tcpstat.tcps_keepdrops++;
367 tp = tcp_drop(tp, ETIMEDOUT);
368 break;
380 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
381 in_losing(tp->t_inpcb);
382 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
383 tp->t_srtt = 0;
369 }
384 }
370 return (tp);
385 tp->snd_nxt = tp->snd_una;
386 /*
387 * Force a segment to be sent.
388 */
389 tp->t_flags |= TF_ACKNOW;
390 /*
391 * If timing a segment in this window, stop the timer.
392 */
393 tp->t_rtttime = 0;
394 /*
395 * Close the congestion window down to one segment
396 * (we'll open it by one segment for each ack we get).
397 * Since we probably have a window's worth of unacked
398 * data accumulated, this "slow start" keeps us from
399 * dumping all that data as back-to-back packets (which
400 * might overwhelm an intermediate gateway).
401 *
402 * There are two phases to the opening: Initially we
403 * open by one mss on each ack. This makes the window
404 * size increase exponentially with time. If the
405 * window is larger than the path can handle, this
406 * exponential growth results in dropped packet(s)
407 * almost immediately. To get more time between
408 * drops but still "push" the network to take advantage
409 * of improving conditions, we switch from exponential
410 * to linear window opening at some threshhold size.
411 * For a threshhold, we use half the current window
412 * size, truncated to a multiple of the mss.
413 *
414 * (the minimum cwnd that will give us exponential
415 * growth is 2 mss. We don't allow the threshhold
416 * to go below this.)
417 */
418 {
419 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
420 if (win < 2)
421 win = 2;
422 tp->snd_cwnd = tp->t_maxseg;
423 tp->snd_ssthresh = win * tp->t_maxseg;
424 tp->t_dupacks = 0;
425 }
426 (void) tcp_output(tp);
427
428out:
429#ifdef TCPDEBUG
430 if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
431 tcp_trace(TA_USER, ostate, tp, (struct tcpiphdr *)0,
432 PRU_SLOWTIMO);
433#endif
434 splx(s);
371}
435}