• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/net/dccp/ccids/
1/*
2 *  Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
3 *
4 *  Changes to meet Linux coding standards, and DCCP infrastructure fixes.
5 *
6 *  Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 *
8 *  This program is free software; you can redistribute it and/or modify
9 *  it under the terms of the GNU General Public License as published by
10 *  the Free Software Foundation; either version 2 of the License, or
11 *  (at your option) any later version.
12 *
13 *  This program is distributed in the hope that it will be useful,
14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 *  GNU General Public License for more details.
17 *
18 *  You should have received a copy of the GNU General Public License
19 *  along with this program; if not, write to the Free Software
20 *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * This implementation should follow RFC 4341
25 */
26#include <linux/slab.h>
27#include "../feat.h"
28#include "../ccid.h"
29#include "../dccp.h"
30#include "ccid2.h"
31
32
33#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
34static int ccid2_debug;
35#define ccid2_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid2_debug, format, ##a)
36
37static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hc)
38{
39	int len = 0;
40	int pipe = 0;
41	struct ccid2_seq *seqp = hc->tx_seqh;
42
43	/* there is data in the chain */
44	if (seqp != hc->tx_seqt) {
45		seqp = seqp->ccid2s_prev;
46		len++;
47		if (!seqp->ccid2s_acked)
48			pipe++;
49
50		while (seqp != hc->tx_seqt) {
51			struct ccid2_seq *prev = seqp->ccid2s_prev;
52
53			len++;
54			if (!prev->ccid2s_acked)
55				pipe++;
56
57			/* packets are sent sequentially */
58			BUG_ON(dccp_delta_seqno(seqp->ccid2s_seq,
59						prev->ccid2s_seq ) >= 0);
60			BUG_ON(time_before(seqp->ccid2s_sent,
61					   prev->ccid2s_sent));
62
63			seqp = prev;
64		}
65	}
66
67	BUG_ON(pipe != hc->tx_pipe);
68	ccid2_pr_debug("len of chain=%d\n", len);
69
70	do {
71		seqp = seqp->ccid2s_prev;
72		len++;
73	} while (seqp != hc->tx_seqh);
74
75	ccid2_pr_debug("total len=%d\n", len);
76	BUG_ON(len != hc->tx_seqbufc * CCID2_SEQBUF_LEN);
77}
78#else
79#define ccid2_pr_debug(format, a...)
80#define ccid2_hc_tx_check_sanity(hc)
81#endif
82
83static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
84{
85	struct ccid2_seq *seqp;
86	int i;
87
88	/* check if we have space to preserve the pointer to the buffer */
89	if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) /
90			       sizeof(struct ccid2_seq *)))
91		return -ENOMEM;
92
93	/* allocate buffer and initialize linked list */
94	seqp = kmalloc(CCID2_SEQBUF_LEN * sizeof(struct ccid2_seq), gfp_any());
95	if (seqp == NULL)
96		return -ENOMEM;
97
98	for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
99		seqp[i].ccid2s_next = &seqp[i + 1];
100		seqp[i + 1].ccid2s_prev = &seqp[i];
101	}
102	seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
103	seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
104
105	/* This is the first allocation.  Initiate the head and tail.  */
106	if (hc->tx_seqbufc == 0)
107		hc->tx_seqh = hc->tx_seqt = seqp;
108	else {
109		/* link the existing list with the one we just created */
110		hc->tx_seqh->ccid2s_next = seqp;
111		seqp->ccid2s_prev = hc->tx_seqh;
112
113		hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
114		seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt;
115	}
116
117	/* store the original pointer to the buffer so we can free it */
118	hc->tx_seqbuf[hc->tx_seqbufc] = seqp;
119	hc->tx_seqbufc++;
120
121	return 0;
122}
123
124static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
125{
126	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
127
128	if (hc->tx_pipe < hc->tx_cwnd)
129		return 0;
130
131	return 1;
132}
133
134static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
135{
136	struct dccp_sock *dp = dccp_sk(sk);
137	u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
138
139	/*
140	 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
141	 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
142	 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
143	 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
144	 */
145	if (val == 0 || val > max_ratio) {
146		DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
147		val = max_ratio;
148	}
149	if (val > DCCPF_ACK_RATIO_MAX)
150		val = DCCPF_ACK_RATIO_MAX;
151
152	if (val == dp->dccps_l_ack_ratio)
153		return;
154
155	ccid2_pr_debug("changing local ack ratio to %u\n", val);
156	dp->dccps_l_ack_ratio = val;
157}
158
159static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hc, long val)
160{
161	ccid2_pr_debug("change SRTT to %ld\n", val);
162	hc->tx_srtt = val;
163}
164
165static void ccid2_start_rto_timer(struct sock *sk);
166
167static void ccid2_hc_tx_rto_expire(unsigned long data)
168{
169	struct sock *sk = (struct sock *)data;
170	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
171	long s;
172
173	bh_lock_sock(sk);
174	if (sock_owned_by_user(sk)) {
175		sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
176		goto out;
177	}
178
179	ccid2_pr_debug("RTO_EXPIRE\n");
180
181	ccid2_hc_tx_check_sanity(hc);
182
183	/* back-off timer */
184	hc->tx_rto <<= 1;
185
186	s = hc->tx_rto / HZ;
187	if (s > 60)
188		hc->tx_rto = 60 * HZ;
189
190	ccid2_start_rto_timer(sk);
191
192	/* adjust pipe, cwnd etc */
193	hc->tx_ssthresh = hc->tx_cwnd / 2;
194	if (hc->tx_ssthresh < 2)
195		hc->tx_ssthresh = 2;
196	hc->tx_cwnd	 = 1;
197	hc->tx_pipe	 = 0;
198
199	/* clear state about stuff we sent */
200	hc->tx_seqt = hc->tx_seqh;
201	hc->tx_packets_acked = 0;
202
203	/* clear ack ratio state. */
204	hc->tx_rpseq    = 0;
205	hc->tx_rpdupack = -1;
206	ccid2_change_l_ack_ratio(sk, 1);
207	ccid2_hc_tx_check_sanity(hc);
208out:
209	bh_unlock_sock(sk);
210	sock_put(sk);
211}
212
213static void ccid2_start_rto_timer(struct sock *sk)
214{
215	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
216
217	ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto);
218
219	BUG_ON(timer_pending(&hc->tx_rtotimer));
220	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
221}
222
223static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
224{
225	struct dccp_sock *dp = dccp_sk(sk);
226	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
227	struct ccid2_seq *next;
228
229	hc->tx_pipe++;
230
231	hc->tx_seqh->ccid2s_seq   = dp->dccps_gss;
232	hc->tx_seqh->ccid2s_acked = 0;
233	hc->tx_seqh->ccid2s_sent  = jiffies;
234
235	next = hc->tx_seqh->ccid2s_next;
236	/* check if we need to alloc more space */
237	if (next == hc->tx_seqt) {
238		if (ccid2_hc_tx_alloc_seq(hc)) {
239			DCCP_CRIT("packet history - out of memory!");
240			return;
241		}
242		next = hc->tx_seqh->ccid2s_next;
243		BUG_ON(next == hc->tx_seqt);
244	}
245	hc->tx_seqh = next;
246
247	ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe);
248
249
250	/* setup RTO timer */
251	if (!timer_pending(&hc->tx_rtotimer))
252		ccid2_start_rto_timer(sk);
253
254#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
255	do {
256		struct ccid2_seq *seqp = hc->tx_seqt;
257
258		while (seqp != hc->tx_seqh) {
259			ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
260				       (unsigned long long)seqp->ccid2s_seq,
261				       seqp->ccid2s_acked, seqp->ccid2s_sent);
262			seqp = seqp->ccid2s_next;
263		}
264	} while (0);
265	ccid2_pr_debug("=========\n");
266	ccid2_hc_tx_check_sanity(hc);
267#endif
268}
269
270static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
271			   unsigned char **vec, unsigned char *veclen)
272{
273	const struct dccp_hdr *dh = dccp_hdr(skb);
274	unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
275	unsigned char *opt_ptr;
276	const unsigned char *opt_end = (unsigned char *)dh +
277					(dh->dccph_doff * 4);
278	unsigned char opt, len;
279	unsigned char *value;
280
281	BUG_ON(offset < 0);
282	options += offset;
283	opt_ptr = options;
284	if (opt_ptr >= opt_end)
285		return -1;
286
287	while (opt_ptr != opt_end) {
288		opt   = *opt_ptr++;
289		len   = 0;
290		value = NULL;
291
292		/* Check if this isn't a single byte option */
293		if (opt > DCCPO_MAX_RESERVED) {
294			if (opt_ptr == opt_end)
295				goto out_invalid_option;
296
297			len = *opt_ptr++;
298			if (len < 3)
299				goto out_invalid_option;
300			/*
301			 * Remove the type and len fields, leaving
302			 * just the value size
303			 */
304			len     -= 2;
305			value   = opt_ptr;
306			opt_ptr += len;
307
308			if (opt_ptr > opt_end)
309				goto out_invalid_option;
310		}
311
312		switch (opt) {
313		case DCCPO_ACK_VECTOR_0:
314		case DCCPO_ACK_VECTOR_1:
315			*vec	= value;
316			*veclen = len;
317			return offset + (opt_ptr - options);
318		}
319	}
320
321	return -1;
322
323out_invalid_option:
324	DCCP_BUG("Invalid option - this should not happen (previous parsing)!");
325	return -1;
326}
327
328static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
329{
330	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
331
332	sk_stop_timer(sk, &hc->tx_rtotimer);
333	ccid2_pr_debug("deleted RTO timer\n");
334}
335
336static inline void ccid2_new_ack(struct sock *sk,
337				 struct ccid2_seq *seqp,
338				 unsigned int *maxincr)
339{
340	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
341
342	if (hc->tx_cwnd < hc->tx_ssthresh) {
343		if (*maxincr > 0 && ++hc->tx_packets_acked == 2) {
344			hc->tx_cwnd += 1;
345			*maxincr    -= 1;
346			hc->tx_packets_acked = 0;
347		}
348	} else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
349			hc->tx_cwnd += 1;
350			hc->tx_packets_acked = 0;
351	}
352
353	/* update RTO */
354	if (hc->tx_srtt == -1 ||
355	    time_after(jiffies, hc->tx_lastrtt + hc->tx_srtt)) {
356		unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent;
357		int s;
358
359		/* first measurement */
360		if (hc->tx_srtt == -1) {
361			ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
362				       r, jiffies,
363				       (unsigned long long)seqp->ccid2s_seq);
364			ccid2_change_srtt(hc, r);
365			hc->tx_rttvar = r >> 1;
366		} else {
367			/* RTTVAR */
368			long tmp = hc->tx_srtt - r;
369			long srtt;
370
371			if (tmp < 0)
372				tmp *= -1;
373
374			tmp >>= 2;
375			hc->tx_rttvar *= 3;
376			hc->tx_rttvar >>= 2;
377			hc->tx_rttvar += tmp;
378
379			/* SRTT */
380			srtt = hc->tx_srtt;
381			srtt *= 7;
382			srtt >>= 3;
383			tmp = r >> 3;
384			srtt += tmp;
385			ccid2_change_srtt(hc, srtt);
386		}
387		s = hc->tx_rttvar << 2;
388		/* clock granularity is 1 when based on jiffies */
389		if (!s)
390			s = 1;
391		hc->tx_rto = hc->tx_srtt + s;
392
393		/* must be at least a second */
394		s = hc->tx_rto / HZ;
395		/* DCCP doesn't require this [but I like it cuz my code sux] */
396		if (s < 1)
397			hc->tx_rto = HZ;
398		/* max 60 seconds */
399		if (s > 60)
400			hc->tx_rto = HZ * 60;
401
402		hc->tx_lastrtt = jiffies;
403
404		ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
405			       hc->tx_srtt, hc->tx_rttvar,
406			       hc->tx_rto, HZ, r);
407	}
408
409	/* we got a new ack, so re-start RTO timer */
410	ccid2_hc_tx_kill_rto_timer(sk);
411	ccid2_start_rto_timer(sk);
412}
413
414static void ccid2_hc_tx_dec_pipe(struct sock *sk)
415{
416	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
417
418	if (hc->tx_pipe == 0)
419		DCCP_BUG("pipe == 0");
420	else
421		hc->tx_pipe--;
422
423	if (hc->tx_pipe == 0)
424		ccid2_hc_tx_kill_rto_timer(sk);
425}
426
427static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
428{
429	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
430
431	if (time_before(seqp->ccid2s_sent, hc->tx_last_cong)) {
432		ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
433		return;
434	}
435
436	hc->tx_last_cong = jiffies;
437
438	hc->tx_cwnd      = hc->tx_cwnd / 2 ? : 1U;
439	hc->tx_ssthresh  = max(hc->tx_cwnd, 2U);
440
441	/* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
442	if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
443		ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
444}
445
446static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
447{
448	struct dccp_sock *dp = dccp_sk(sk);
449	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
450	u64 ackno, seqno;
451	struct ccid2_seq *seqp;
452	unsigned char *vector;
453	unsigned char veclen;
454	int offset = 0;
455	int done = 0;
456	unsigned int maxincr = 0;
457
458	ccid2_hc_tx_check_sanity(hc);
459	/* check reverse path congestion */
460	seqno = DCCP_SKB_CB(skb)->dccpd_seq;
461
462	/* need to bootstrap */
463	if (hc->tx_rpdupack == -1) {
464		hc->tx_rpdupack = 0;
465		hc->tx_rpseq    = seqno;
466	} else {
467		/* check if packet is consecutive */
468		if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1)
469			hc->tx_rpseq = seqno;
470		/* it's a later packet */
471		else if (after48(seqno, hc->tx_rpseq)) {
472			hc->tx_rpdupack++;
473
474			/* check if we got enough dupacks */
475			if (hc->tx_rpdupack >= NUMDUPACK) {
476				hc->tx_rpdupack = -1;
477				hc->tx_rpseq    = 0;
478
479				ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
480			}
481		}
482	}
483
484	/* check forward path congestion */
485	/* still didn't send out new data packets */
486	if (hc->tx_seqh == hc->tx_seqt)
487		return;
488
489	switch (DCCP_SKB_CB(skb)->dccpd_type) {
490	case DCCP_PKT_ACK:
491	case DCCP_PKT_DATAACK:
492		break;
493	default:
494		return;
495	}
496
497	ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
498	if (after48(ackno, hc->tx_high_ack))
499		hc->tx_high_ack = ackno;
500
501	seqp = hc->tx_seqt;
502	while (before48(seqp->ccid2s_seq, ackno)) {
503		seqp = seqp->ccid2s_next;
504		if (seqp == hc->tx_seqh) {
505			seqp = hc->tx_seqh->ccid2s_prev;
506			break;
507		}
508	}
509
510	/*
511	 * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
512	 * packets per acknowledgement. Rounding up avoids that cwnd is not
513	 * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
514	 */
515	if (hc->tx_cwnd < hc->tx_ssthresh)
516		maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
517
518	/* go through all ack vectors */
519	while ((offset = ccid2_ackvector(sk, skb, offset,
520					 &vector, &veclen)) != -1) {
521		/* go through this ack vector */
522		while (veclen--) {
523			const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
524			u64 ackno_end_rl = SUB48(ackno, rl);
525
526			ccid2_pr_debug("ackvec start:%llu end:%llu\n",
527				       (unsigned long long)ackno,
528				       (unsigned long long)ackno_end_rl);
529			/* if the seqno we are analyzing is larger than the
530			 * current ackno, then move towards the tail of our
531			 * seqnos.
532			 */
533			while (after48(seqp->ccid2s_seq, ackno)) {
534				if (seqp == hc->tx_seqt) {
535					done = 1;
536					break;
537				}
538				seqp = seqp->ccid2s_prev;
539			}
540			if (done)
541				break;
542
543			/* check all seqnos in the range of the vector
544			 * run length
545			 */
546			while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
547				const u8 state = *vector &
548						 DCCP_ACKVEC_STATE_MASK;
549
550				/* new packet received or marked */
551				if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
552				    !seqp->ccid2s_acked) {
553					if (state ==
554					    DCCP_ACKVEC_STATE_ECN_MARKED) {
555						ccid2_congestion_event(sk,
556								       seqp);
557					} else
558						ccid2_new_ack(sk, seqp,
559							      &maxincr);
560
561					seqp->ccid2s_acked = 1;
562					ccid2_pr_debug("Got ack for %llu\n",
563						       (unsigned long long)seqp->ccid2s_seq);
564					ccid2_hc_tx_dec_pipe(sk);
565				}
566				if (seqp == hc->tx_seqt) {
567					done = 1;
568					break;
569				}
570				seqp = seqp->ccid2s_prev;
571			}
572			if (done)
573				break;
574
575			ackno = SUB48(ackno_end_rl, 1);
576			vector++;
577		}
578		if (done)
579			break;
580	}
581
582	/* The state about what is acked should be correct now
583	 * Check for NUMDUPACK
584	 */
585	seqp = hc->tx_seqt;
586	while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) {
587		seqp = seqp->ccid2s_next;
588		if (seqp == hc->tx_seqh) {
589			seqp = hc->tx_seqh->ccid2s_prev;
590			break;
591		}
592	}
593	done = 0;
594	while (1) {
595		if (seqp->ccid2s_acked) {
596			done++;
597			if (done == NUMDUPACK)
598				break;
599		}
600		if (seqp == hc->tx_seqt)
601			break;
602		seqp = seqp->ccid2s_prev;
603	}
604
605	/* If there are at least 3 acknowledgements, anything unacknowledged
606	 * below the last sequence number is considered lost
607	 */
608	if (done == NUMDUPACK) {
609		struct ccid2_seq *last_acked = seqp;
610
611		/* check for lost packets */
612		while (1) {
613			if (!seqp->ccid2s_acked) {
614				ccid2_pr_debug("Packet lost: %llu\n",
615					       (unsigned long long)seqp->ccid2s_seq);
616				ccid2_congestion_event(sk, seqp);
617				ccid2_hc_tx_dec_pipe(sk);
618			}
619			if (seqp == hc->tx_seqt)
620				break;
621			seqp = seqp->ccid2s_prev;
622		}
623
624		hc->tx_seqt = last_acked;
625	}
626
627	/* trim acked packets in tail */
628	while (hc->tx_seqt != hc->tx_seqh) {
629		if (!hc->tx_seqt->ccid2s_acked)
630			break;
631
632		hc->tx_seqt = hc->tx_seqt->ccid2s_next;
633	}
634
635	ccid2_hc_tx_check_sanity(hc);
636}
637
638static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
639{
640	struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
641	struct dccp_sock *dp = dccp_sk(sk);
642	u32 max_ratio;
643
644	/* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
645	hc->tx_ssthresh = ~0U;
646
647	/*
648	 * RFC 4341, 5: "The cwnd parameter is initialized to at most four
649	 * packets for new connections, following the rules from [RFC3390]".
650	 * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
651	 */
652	hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
653
654	/* Make sure that Ack Ratio is enabled and within bounds. */
655	max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
656	if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
657		dp->dccps_l_ack_ratio = max_ratio;
658
659	if (ccid2_hc_tx_alloc_seq(hc))
660		return -ENOMEM;
661
662	hc->tx_rto	 = 3 * HZ;
663	ccid2_change_srtt(hc, -1);
664	hc->tx_rttvar    = -1;
665	hc->tx_rpdupack  = -1;
666	hc->tx_last_cong = jiffies;
667	setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
668			(unsigned long)sk);
669
670	ccid2_hc_tx_check_sanity(hc);
671	return 0;
672}
673
674static void ccid2_hc_tx_exit(struct sock *sk)
675{
676	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
677	int i;
678
679	ccid2_hc_tx_kill_rto_timer(sk);
680
681	for (i = 0; i < hc->tx_seqbufc; i++)
682		kfree(hc->tx_seqbuf[i]);
683	hc->tx_seqbufc = 0;
684}
685
686static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
687{
688	const struct dccp_sock *dp = dccp_sk(sk);
689	struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
690
691	switch (DCCP_SKB_CB(skb)->dccpd_type) {
692	case DCCP_PKT_DATA:
693	case DCCP_PKT_DATAACK:
694		hc->rx_data++;
695		if (hc->rx_data >= dp->dccps_r_ack_ratio) {
696			dccp_send_ack(sk);
697			hc->rx_data = 0;
698		}
699		break;
700	}
701}
702
703struct ccid_operations ccid2_ops = {
704	.ccid_id		= DCCPC_CCID2,
705	.ccid_name		= "TCP-like",
706	.ccid_hc_tx_obj_size	= sizeof(struct ccid2_hc_tx_sock),
707	.ccid_hc_tx_init	= ccid2_hc_tx_init,
708	.ccid_hc_tx_exit	= ccid2_hc_tx_exit,
709	.ccid_hc_tx_send_packet	= ccid2_hc_tx_send_packet,
710	.ccid_hc_tx_packet_sent	= ccid2_hc_tx_packet_sent,
711	.ccid_hc_tx_packet_recv	= ccid2_hc_tx_packet_recv,
712	.ccid_hc_rx_obj_size	= sizeof(struct ccid2_hc_rx_sock),
713	.ccid_hc_rx_packet_recv	= ccid2_hc_rx_packet_recv,
714};
715
716#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
717module_param(ccid2_debug, bool, 0644);
718MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");
719#endif
720