1/*
2 *  net/dccp/timer.c
3 *
4 *  An implementation of the DCCP protocol
5 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 *	This program is free software; you can redistribute it and/or
8 *	modify it under the terms of the GNU General Public License
9 *	as published by the Free Software Foundation; either version
10 *	2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/dccp.h>
14#include <linux/skbuff.h>
15
16#include "dccp.h"
17
18/* sysctl variables governing numbers of retransmission attempts */
19int  sysctl_dccp_request_retries	__read_mostly = TCP_SYN_RETRIES;
20int  sysctl_dccp_retries1		__read_mostly = TCP_RETR1;
21int  sysctl_dccp_retries2		__read_mostly = TCP_RETR2;
22
23static void dccp_write_err(struct sock *sk)
24{
25	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
26	sk->sk_error_report(sk);
27
28	dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
29	dccp_done(sk);
30	DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
31}
32
33/* A write timeout has occurred. Process the after effects. */
34static int dccp_write_timeout(struct sock *sk)
35{
36	const struct inet_connection_sock *icsk = inet_csk(sk);
37	int retry_until;
38
39	if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
40		if (icsk->icsk_retransmits != 0)
41			dst_negative_advice(&sk->sk_dst_cache);
42		retry_until = icsk->icsk_syn_retries ?
43			    : sysctl_dccp_request_retries;
44	} else {
45		if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
46			/* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu
47			   black hole detection. :-(
48
49			   It is place to make it. It is not made. I do not want
50			   to make it. It is disguisting. It does not work in any
51			   case. Let me to cite the same draft, which requires for
52			   us to implement this:
53
54   "The one security concern raised by this memo is that ICMP black holes
55   are often caused by over-zealous security administrators who block
56   all ICMP messages.  It is vitally important that those who design and
57   deploy security systems understand the impact of strict filtering on
58   upper-layer protocols.  The safest web site in the world is worthless
59   if most TCP implementations cannot transfer data from it.  It would
60   be far nicer to have all of the black holes fixed rather than fixing
61   all of the TCP implementations."
62
63			   Golden words :-).
64		   */
65
66			dst_negative_advice(&sk->sk_dst_cache);
67		}
68
69		retry_until = sysctl_dccp_retries2;
70	}
71
72	if (icsk->icsk_retransmits >= retry_until) {
73		/* Has it gone just too far? */
74		dccp_write_err(sk);
75		return 1;
76	}
77	return 0;
78}
79
80/*
81 *	The DCCP retransmit timer.
82 */
83static void dccp_retransmit_timer(struct sock *sk)
84{
85	struct inet_connection_sock *icsk = inet_csk(sk);
86
87	/* retransmit timer is used for feature negotiation throughout
88	 * connection.  In this case, no packet is re-transmitted, but rather an
89	 * ack is generated and pending changes are placed into its options.
90	 */
91	if (sk->sk_send_head == NULL) {
92		dccp_pr_debug("feat negotiation retransmit timeout %p\n", sk);
93		if (sk->sk_state == DCCP_OPEN)
94			dccp_send_ack(sk);
95		goto backoff;
96	}
97
98	/*
99	 * sk->sk_send_head has to have one skb with
100	 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP
101	 * packet types. The only packets eligible for retransmission are:
102	 *	-- Requests in client-REQUEST  state (sec. 8.1.1)
103	 *	-- Acks     in client-PARTOPEN state (sec. 8.1.5)
104	 *	-- CloseReq in server-CLOSEREQ state (sec. 8.3)
105	 *	-- Close    in   node-CLOSING  state (sec. 8.3)                */
106	BUG_TRAP(sk->sk_send_head != NULL);
107
108	/*
109	 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
110	 * sent, no need to retransmit, this sock is dead.
111	 */
112	if (dccp_write_timeout(sk))
113		goto out;
114
115	/*
116	 * We want to know the number of packets retransmitted, not the
117	 * total number of retransmissions of clones of original packets.
118	 */
119	if (icsk->icsk_retransmits == 0)
120		DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
121
122	if (dccp_retransmit_skb(sk, sk->sk_send_head) < 0) {
123		/*
124		 * Retransmission failed because of local congestion,
125		 * do not backoff.
126		 */
127		if (icsk->icsk_retransmits == 0)
128			icsk->icsk_retransmits = 1;
129		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
130					  min(icsk->icsk_rto,
131					      TCP_RESOURCE_PROBE_INTERVAL),
132					  DCCP_RTO_MAX);
133		goto out;
134	}
135
136backoff:
137	icsk->icsk_backoff++;
138	icsk->icsk_retransmits++;
139
140	icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
141	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
142				  DCCP_RTO_MAX);
143	if (icsk->icsk_retransmits > sysctl_dccp_retries1)
144		__sk_dst_reset(sk);
145out:;
146}
147
148static void dccp_write_timer(unsigned long data)
149{
150	struct sock *sk = (struct sock *)data;
151	struct inet_connection_sock *icsk = inet_csk(sk);
152	int event = 0;
153
154	bh_lock_sock(sk);
155	if (sock_owned_by_user(sk)) {
156		/* Try again later */
157		sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
158			       jiffies + (HZ / 20));
159		goto out;
160	}
161
162	if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
163		goto out;
164
165	if (time_after(icsk->icsk_timeout, jiffies)) {
166		sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
167			       icsk->icsk_timeout);
168		goto out;
169	}
170
171	event = icsk->icsk_pending;
172	icsk->icsk_pending = 0;
173
174	switch (event) {
175	case ICSK_TIME_RETRANS:
176		dccp_retransmit_timer(sk);
177		break;
178	}
179out:
180	bh_unlock_sock(sk);
181	sock_put(sk);
182}
183
184/*
185 *	Timer for listening sockets
186 */
187static void dccp_response_timer(struct sock *sk)
188{
189	inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
190				   DCCP_RTO_MAX);
191}
192
193static void dccp_keepalive_timer(unsigned long data)
194{
195	struct sock *sk = (struct sock *)data;
196
197	/* Only process if socket is not in use. */
198	bh_lock_sock(sk);
199	if (sock_owned_by_user(sk)) {
200		/* Try again later. */
201		inet_csk_reset_keepalive_timer(sk, HZ / 20);
202		goto out;
203	}
204
205	if (sk->sk_state == DCCP_LISTEN) {
206		dccp_response_timer(sk);
207		goto out;
208	}
209out:
210	bh_unlock_sock(sk);
211	sock_put(sk);
212}
213
214/* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
215static void dccp_delack_timer(unsigned long data)
216{
217	struct sock *sk = (struct sock *)data;
218	struct inet_connection_sock *icsk = inet_csk(sk);
219
220	bh_lock_sock(sk);
221	if (sock_owned_by_user(sk)) {
222		/* Try again later. */
223		icsk->icsk_ack.blocked = 1;
224		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
225		sk_reset_timer(sk, &icsk->icsk_delack_timer,
226			       jiffies + TCP_DELACK_MIN);
227		goto out;
228	}
229
230	if (sk->sk_state == DCCP_CLOSED ||
231	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
232		goto out;
233	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
234		sk_reset_timer(sk, &icsk->icsk_delack_timer,
235			       icsk->icsk_ack.timeout);
236		goto out;
237	}
238
239	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
240
241	if (inet_csk_ack_scheduled(sk)) {
242		if (!icsk->icsk_ack.pingpong) {
243			/* Delayed ACK missed: inflate ATO. */
244			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
245						 icsk->icsk_rto);
246		} else {
247			/* Delayed ACK missed: leave pingpong mode and
248			 * deflate ATO.
249			 */
250			icsk->icsk_ack.pingpong = 0;
251			icsk->icsk_ack.ato = TCP_ATO_MIN;
252		}
253		dccp_send_ack(sk);
254		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
255	}
256out:
257	bh_unlock_sock(sk);
258	sock_put(sk);
259}
260
261/* Transmit-delay timer: used by the CCIDs to delay actual send time */
262static void dccp_write_xmit_timer(unsigned long data)
263{
264	struct sock *sk = (struct sock *)data;
265	struct dccp_sock *dp = dccp_sk(sk);
266
267	bh_lock_sock(sk);
268	if (sock_owned_by_user(sk))
269		sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
270	else
271		dccp_write_xmit(sk, 0);
272	bh_unlock_sock(sk);
273	sock_put(sk);
274}
275
276static void dccp_init_write_xmit_timer(struct sock *sk)
277{
278	struct dccp_sock *dp = dccp_sk(sk);
279
280	init_timer(&dp->dccps_xmit_timer);
281	dp->dccps_xmit_timer.data = (unsigned long)sk;
282	dp->dccps_xmit_timer.function = dccp_write_xmit_timer;
283}
284
285void dccp_init_xmit_timers(struct sock *sk)
286{
287	dccp_init_write_xmit_timer(sk);
288	inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
289				  &dccp_keepalive_timer);
290}
291