1// SPDX-License-Identifier: GPL-2.0-or-later
2/* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001-2003 Intel Corp.
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * These functions implement the sctp_outq class.   The outqueue handles
11 * bundling and queueing of outgoing SCTP chunks.
12 *
13 * Please send any bug reports or fixes you make to the
14 * email address(es):
15 *    lksctp developers <linux-sctp@vger.kernel.org>
16 *
17 * Written or modified by:
18 *    La Monte H.P. Yarroll <piggy@acm.org>
19 *    Karl Knutson          <karl@athena.chicago.il.us>
20 *    Perry Melange         <pmelange@null.cc.uic.edu>
21 *    Xingang Guo           <xingang.guo@intel.com>
22 *    Hui Huang 	    <hui.huang@nokia.com>
23 *    Sridhar Samudrala     <sri@us.ibm.com>
24 *    Jon Grimm             <jgrimm@us.ibm.com>
25 */
26
27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28
29#include <linux/types.h>
30#include <linux/list.h>   /* For struct list_head */
31#include <linux/socket.h>
32#include <linux/ip.h>
33#include <linux/slab.h>
34#include <net/sock.h>	  /* For skb_set_owner_w */
35
36#include <net/sctp/sctp.h>
37#include <net/sctp/sm.h>
38#include <net/sctp/stream_sched.h>
39#include <trace/events/sctp.h>
40
41/* Declare internal functions here.  */
42static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
43static void sctp_check_transmitted(struct sctp_outq *q,
44				   struct list_head *transmitted_queue,
45				   struct sctp_transport *transport,
46				   union sctp_addr *saddr,
47				   struct sctp_sackhdr *sack,
48				   __u32 *highest_new_tsn);
49
50static void sctp_mark_missing(struct sctp_outq *q,
51			      struct list_head *transmitted_queue,
52			      struct sctp_transport *transport,
53			      __u32 highest_new_tsn,
54			      int count_of_newacks);
55
56static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
57
58/* Add data to the front of the queue. */
59static inline void sctp_outq_head_data(struct sctp_outq *q,
60				       struct sctp_chunk *ch)
61{
62	struct sctp_stream_out_ext *oute;
63	__u16 stream;
64
65	list_add(&ch->list, &q->out_chunk_list);
66	q->out_qlen += ch->skb->len;
67
68	stream = sctp_chunk_stream_no(ch);
69	oute = SCTP_SO(&q->asoc->stream, stream)->ext;
70	list_add(&ch->stream_list, &oute->outq);
71}
72
73/* Take data from the front of the queue. */
74static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
75{
76	return q->sched->dequeue(q);
77}
78
79/* Add data chunk to the end of the queue. */
80static inline void sctp_outq_tail_data(struct sctp_outq *q,
81				       struct sctp_chunk *ch)
82{
83	struct sctp_stream_out_ext *oute;
84	__u16 stream;
85
86	list_add_tail(&ch->list, &q->out_chunk_list);
87	q->out_qlen += ch->skb->len;
88
89	stream = sctp_chunk_stream_no(ch);
90	oute = SCTP_SO(&q->asoc->stream, stream)->ext;
91	list_add_tail(&ch->stream_list, &oute->outq);
92}
93
94/*
95 * SFR-CACC algorithm:
96 * D) If count_of_newacks is greater than or equal to 2
97 * and t was not sent to the current primary then the
98 * sender MUST NOT increment missing report count for t.
99 */
100static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
101				       struct sctp_transport *transport,
102				       int count_of_newacks)
103{
104	if (count_of_newacks >= 2 && transport != primary)
105		return 1;
106	return 0;
107}
108
109/*
110 * SFR-CACC algorithm:
111 * F) If count_of_newacks is less than 2, let d be the
112 * destination to which t was sent. If cacc_saw_newack
113 * is 0 for destination d, then the sender MUST NOT
114 * increment missing report count for t.
115 */
116static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
117				       int count_of_newacks)
118{
119	if (count_of_newacks < 2 &&
120			(transport && !transport->cacc.cacc_saw_newack))
121		return 1;
122	return 0;
123}
124
125/*
126 * SFR-CACC algorithm:
127 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
128 * execute steps C, D, F.
129 *
130 * C has been implemented in sctp_outq_sack
131 */
132static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
133				     struct sctp_transport *transport,
134				     int count_of_newacks)
135{
136	if (!primary->cacc.cycling_changeover) {
137		if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
138			return 1;
139		if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
140			return 1;
141		return 0;
142	}
143	return 0;
144}
145
146/*
147 * SFR-CACC algorithm:
148 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
149 * than next_tsn_at_change of the current primary, then
150 * the sender MUST NOT increment missing report count
151 * for t.
152 */
153static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
154{
155	if (primary->cacc.cycling_changeover &&
156	    TSN_lt(tsn, primary->cacc.next_tsn_at_change))
157		return 1;
158	return 0;
159}
160
161/*
162 * SFR-CACC algorithm:
163 * 3) If the missing report count for TSN t is to be
164 * incremented according to [RFC2960] and
165 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
166 * then the sender MUST further execute steps 3.1 and
167 * 3.2 to determine if the missing report count for
168 * TSN t SHOULD NOT be incremented.
169 *
170 * 3.3) If 3.1 and 3.2 do not dictate that the missing
171 * report count for t should not be incremented, then
172 * the sender SHOULD increment missing report count for
173 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
174 */
175static inline int sctp_cacc_skip(struct sctp_transport *primary,
176				 struct sctp_transport *transport,
177				 int count_of_newacks,
178				 __u32 tsn)
179{
180	if (primary->cacc.changeover_active &&
181	    (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
182	     sctp_cacc_skip_3_2(primary, tsn)))
183		return 1;
184	return 0;
185}
186
187/* Initialize an existing sctp_outq.  This does the boring stuff.
188 * You still need to define handlers if you really want to DO
189 * something with this structure...
190 */
191void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
192{
193	memset(q, 0, sizeof(struct sctp_outq));
194
195	q->asoc = asoc;
196	INIT_LIST_HEAD(&q->out_chunk_list);
197	INIT_LIST_HEAD(&q->control_chunk_list);
198	INIT_LIST_HEAD(&q->retransmit);
199	INIT_LIST_HEAD(&q->sacked);
200	INIT_LIST_HEAD(&q->abandoned);
201	sctp_sched_set_sched(asoc, sctp_sk(asoc->base.sk)->default_ss);
202}
203
204/* Free the outqueue structure and any related pending chunks.
205 */
206static void __sctp_outq_teardown(struct sctp_outq *q)
207{
208	struct sctp_transport *transport;
209	struct list_head *lchunk, *temp;
210	struct sctp_chunk *chunk, *tmp;
211
212	/* Throw away unacknowledged chunks. */
213	list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
214			transports) {
215		while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
216			chunk = list_entry(lchunk, struct sctp_chunk,
217					   transmitted_list);
218			/* Mark as part of a failed message. */
219			sctp_chunk_fail(chunk, q->error);
220			sctp_chunk_free(chunk);
221		}
222	}
223
224	/* Throw away chunks that have been gap ACKed.  */
225	list_for_each_safe(lchunk, temp, &q->sacked) {
226		list_del_init(lchunk);
227		chunk = list_entry(lchunk, struct sctp_chunk,
228				   transmitted_list);
229		sctp_chunk_fail(chunk, q->error);
230		sctp_chunk_free(chunk);
231	}
232
233	/* Throw away any chunks in the retransmit queue. */
234	list_for_each_safe(lchunk, temp, &q->retransmit) {
235		list_del_init(lchunk);
236		chunk = list_entry(lchunk, struct sctp_chunk,
237				   transmitted_list);
238		sctp_chunk_fail(chunk, q->error);
239		sctp_chunk_free(chunk);
240	}
241
242	/* Throw away any chunks that are in the abandoned queue. */
243	list_for_each_safe(lchunk, temp, &q->abandoned) {
244		list_del_init(lchunk);
245		chunk = list_entry(lchunk, struct sctp_chunk,
246				   transmitted_list);
247		sctp_chunk_fail(chunk, q->error);
248		sctp_chunk_free(chunk);
249	}
250
251	/* Throw away any leftover data chunks. */
252	while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
253		sctp_sched_dequeue_done(q, chunk);
254
255		/* Mark as send failure. */
256		sctp_chunk_fail(chunk, q->error);
257		sctp_chunk_free(chunk);
258	}
259
260	/* Throw away any leftover control chunks. */
261	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
262		list_del_init(&chunk->list);
263		sctp_chunk_free(chunk);
264	}
265}
266
267void sctp_outq_teardown(struct sctp_outq *q)
268{
269	__sctp_outq_teardown(q);
270	sctp_outq_init(q->asoc, q);
271}
272
273/* Free the outqueue structure and any related pending chunks.  */
274void sctp_outq_free(struct sctp_outq *q)
275{
276	/* Throw away leftover chunks. */
277	__sctp_outq_teardown(q);
278}
279
280/* Put a new chunk in an sctp_outq.  */
281void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
282{
283	struct net *net = q->asoc->base.net;
284
285	pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
286		 chunk && chunk->chunk_hdr ?
287		 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
288		 "illegal chunk");
289
290	/* If it is data, queue it up, otherwise, send it
291	 * immediately.
292	 */
293	if (sctp_chunk_is_data(chunk)) {
294		pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
295			 __func__, q, chunk, chunk && chunk->chunk_hdr ?
296			 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
297			 "illegal chunk");
298
299		sctp_outq_tail_data(q, chunk);
300		if (chunk->asoc->peer.prsctp_capable &&
301		    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
302			chunk->asoc->sent_cnt_removable++;
303		if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
304			SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
305		else
306			SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
307	} else {
308		list_add_tail(&chunk->list, &q->control_chunk_list);
309		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
310	}
311
312	if (!q->cork)
313		sctp_outq_flush(q, 0, gfp);
314}
315
316/* Insert a chunk into the sorted list based on the TSNs.  The retransmit list
317 * and the abandoned list are in ascending order.
318 */
319static void sctp_insert_list(struct list_head *head, struct list_head *new)
320{
321	struct list_head *pos;
322	struct sctp_chunk *nchunk, *lchunk;
323	__u32 ntsn, ltsn;
324	int done = 0;
325
326	nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
327	ntsn = ntohl(nchunk->subh.data_hdr->tsn);
328
329	list_for_each(pos, head) {
330		lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
331		ltsn = ntohl(lchunk->subh.data_hdr->tsn);
332		if (TSN_lt(ntsn, ltsn)) {
333			list_add(new, pos->prev);
334			done = 1;
335			break;
336		}
337	}
338	if (!done)
339		list_add_tail(new, head);
340}
341
342static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
343				  struct sctp_sndrcvinfo *sinfo,
344				  struct list_head *queue, int msg_len)
345{
346	struct sctp_chunk *chk, *temp;
347
348	list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
349		struct sctp_stream_out *streamout;
350
351		if (!chk->msg->abandoned &&
352		    (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
353		     chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
354			continue;
355
356		chk->msg->abandoned = 1;
357		list_del_init(&chk->transmitted_list);
358		sctp_insert_list(&asoc->outqueue.abandoned,
359				 &chk->transmitted_list);
360
361		streamout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
362		asoc->sent_cnt_removable--;
363		asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
364		streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
365
366		if (queue != &asoc->outqueue.retransmit &&
367		    !chk->tsn_gap_acked) {
368			if (chk->transport)
369				chk->transport->flight_size -=
370						sctp_data_size(chk);
371			asoc->outqueue.outstanding_bytes -= sctp_data_size(chk);
372		}
373
374		msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
375		if (msg_len <= 0)
376			break;
377	}
378
379	return msg_len;
380}
381
382static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
383				    struct sctp_sndrcvinfo *sinfo, int msg_len)
384{
385	struct sctp_outq *q = &asoc->outqueue;
386	struct sctp_chunk *chk, *temp;
387	struct sctp_stream_out *sout;
388
389	q->sched->unsched_all(&asoc->stream);
390
391	list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
392		if (!chk->msg->abandoned &&
393		    (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
394		     !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
395		     chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
396			continue;
397
398		chk->msg->abandoned = 1;
399		sctp_sched_dequeue_common(q, chk);
400		asoc->sent_cnt_removable--;
401		asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
402
403		sout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
404		sout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
405
406		/* clear out_curr if all frag chunks are pruned */
407		if (asoc->stream.out_curr == sout &&
408		    list_is_last(&chk->frag_list, &chk->msg->chunks))
409			asoc->stream.out_curr = NULL;
410
411		msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
412		sctp_chunk_free(chk);
413		if (msg_len <= 0)
414			break;
415	}
416
417	q->sched->sched_all(&asoc->stream);
418
419	return msg_len;
420}
421
422/* Abandon the chunks according their priorities */
423void sctp_prsctp_prune(struct sctp_association *asoc,
424		       struct sctp_sndrcvinfo *sinfo, int msg_len)
425{
426	struct sctp_transport *transport;
427
428	if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
429		return;
430
431	msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
432					 &asoc->outqueue.retransmit,
433					 msg_len);
434	if (msg_len <= 0)
435		return;
436
437	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
438			    transports) {
439		msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
440						 &transport->transmitted,
441						 msg_len);
442		if (msg_len <= 0)
443			return;
444	}
445
446	sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
447}
448
449/* Mark all the eligible packets on a transport for retransmission.  */
450void sctp_retransmit_mark(struct sctp_outq *q,
451			  struct sctp_transport *transport,
452			  __u8 reason)
453{
454	struct list_head *lchunk, *ltemp;
455	struct sctp_chunk *chunk;
456
457	/* Walk through the specified transmitted queue.  */
458	list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
459		chunk = list_entry(lchunk, struct sctp_chunk,
460				   transmitted_list);
461
462		/* If the chunk is abandoned, move it to abandoned list. */
463		if (sctp_chunk_abandoned(chunk)) {
464			list_del_init(lchunk);
465			sctp_insert_list(&q->abandoned, lchunk);
466
467			/* If this chunk has not been previousely acked,
468			 * stop considering it 'outstanding'.  Our peer
469			 * will most likely never see it since it will
470			 * not be retransmitted
471			 */
472			if (!chunk->tsn_gap_acked) {
473				if (chunk->transport)
474					chunk->transport->flight_size -=
475							sctp_data_size(chunk);
476				q->outstanding_bytes -= sctp_data_size(chunk);
477				q->asoc->peer.rwnd += sctp_data_size(chunk);
478			}
479			continue;
480		}
481
482		/* If we are doing  retransmission due to a timeout or pmtu
483		 * discovery, only the  chunks that are not yet acked should
484		 * be added to the retransmit queue.
485		 */
486		if ((reason == SCTP_RTXR_FAST_RTX  &&
487			    (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
488		    (reason != SCTP_RTXR_FAST_RTX  && !chunk->tsn_gap_acked)) {
489			/* RFC 2960 6.2.1 Processing a Received SACK
490			 *
491			 * C) Any time a DATA chunk is marked for
492			 * retransmission (via either T3-rtx timer expiration
493			 * (Section 6.3.3) or via fast retransmit
494			 * (Section 7.2.4)), add the data size of those
495			 * chunks to the rwnd.
496			 */
497			q->asoc->peer.rwnd += sctp_data_size(chunk);
498			q->outstanding_bytes -= sctp_data_size(chunk);
499			if (chunk->transport)
500				transport->flight_size -= sctp_data_size(chunk);
501
502			/* sctpimpguide-05 Section 2.8.2
503			 * M5) If a T3-rtx timer expires, the
504			 * 'TSN.Missing.Report' of all affected TSNs is set
505			 * to 0.
506			 */
507			chunk->tsn_missing_report = 0;
508
509			/* If a chunk that is being used for RTT measurement
510			 * has to be retransmitted, we cannot use this chunk
511			 * anymore for RTT measurements. Reset rto_pending so
512			 * that a new RTT measurement is started when a new
513			 * data chunk is sent.
514			 */
515			if (chunk->rtt_in_progress) {
516				chunk->rtt_in_progress = 0;
517				transport->rto_pending = 0;
518			}
519
520			/* Move the chunk to the retransmit queue. The chunks
521			 * on the retransmit queue are always kept in order.
522			 */
523			list_del_init(lchunk);
524			sctp_insert_list(&q->retransmit, lchunk);
525		}
526	}
527
528	pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
529		 "flight_size:%d, pba:%d\n", __func__, transport, reason,
530		 transport->cwnd, transport->ssthresh, transport->flight_size,
531		 transport->partial_bytes_acked);
532}
533
534/* Mark all the eligible packets on a transport for retransmission and force
535 * one packet out.
536 */
537void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
538		     enum sctp_retransmit_reason reason)
539{
540	struct net *net = q->asoc->base.net;
541
542	switch (reason) {
543	case SCTP_RTXR_T3_RTX:
544		SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
545		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
546		/* Update the retran path if the T3-rtx timer has expired for
547		 * the current retran path.
548		 */
549		if (transport == transport->asoc->peer.retran_path)
550			sctp_assoc_update_retran_path(transport->asoc);
551		transport->asoc->rtx_data_chunks +=
552			transport->asoc->unack_data;
553		if (transport->pl.state == SCTP_PL_COMPLETE &&
554		    transport->asoc->unack_data)
555			sctp_transport_reset_probe_timer(transport);
556		break;
557	case SCTP_RTXR_FAST_RTX:
558		SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
559		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
560		q->fast_rtx = 1;
561		break;
562	case SCTP_RTXR_PMTUD:
563		SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
564		break;
565	case SCTP_RTXR_T1_RTX:
566		SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
567		transport->asoc->init_retries++;
568		break;
569	default:
570		BUG();
571	}
572
573	sctp_retransmit_mark(q, transport, reason);
574
575	/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
576	 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
577	 * following the procedures outlined in C1 - C5.
578	 */
579	if (reason == SCTP_RTXR_T3_RTX)
580		q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
581
582	/* Flush the queues only on timeout, since fast_rtx is only
583	 * triggered during sack processing and the queue
584	 * will be flushed at the end.
585	 */
586	if (reason != SCTP_RTXR_FAST_RTX)
587		sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
588}
589
590/*
591 * Transmit DATA chunks on the retransmit queue.  Upon return from
592 * __sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
593 * need to be transmitted by the caller.
594 * We assume that pkt->transport has already been set.
595 *
596 * The return value is a normal kernel error return value.
597 */
598static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
599				 int rtx_timeout, int *start_timer, gfp_t gfp)
600{
601	struct sctp_transport *transport = pkt->transport;
602	struct sctp_chunk *chunk, *chunk1;
603	struct list_head *lqueue;
604	enum sctp_xmit status;
605	int error = 0;
606	int timer = 0;
607	int done = 0;
608	int fast_rtx;
609
610	lqueue = &q->retransmit;
611	fast_rtx = q->fast_rtx;
612
613	/* This loop handles time-out retransmissions, fast retransmissions,
614	 * and retransmissions due to opening of whindow.
615	 *
616	 * RFC 2960 6.3.3 Handle T3-rtx Expiration
617	 *
618	 * E3) Determine how many of the earliest (i.e., lowest TSN)
619	 * outstanding DATA chunks for the address for which the
620	 * T3-rtx has expired will fit into a single packet, subject
621	 * to the MTU constraint for the path corresponding to the
622	 * destination transport address to which the retransmission
623	 * is being sent (this may be different from the address for
624	 * which the timer expires [see Section 6.4]). Call this value
625	 * K. Bundle and retransmit those K DATA chunks in a single
626	 * packet to the destination endpoint.
627	 *
628	 * [Just to be painfully clear, if we are retransmitting
629	 * because a timeout just happened, we should send only ONE
630	 * packet of retransmitted data.]
631	 *
632	 * For fast retransmissions we also send only ONE packet.  However,
633	 * if we are just flushing the queue due to open window, we'll
634	 * try to send as much as possible.
635	 */
636	list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
637		/* If the chunk is abandoned, move it to abandoned list. */
638		if (sctp_chunk_abandoned(chunk)) {
639			list_del_init(&chunk->transmitted_list);
640			sctp_insert_list(&q->abandoned,
641					 &chunk->transmitted_list);
642			continue;
643		}
644
645		/* Make sure that Gap Acked TSNs are not retransmitted.  A
646		 * simple approach is just to move such TSNs out of the
647		 * way and into a 'transmitted' queue and skip to the
648		 * next chunk.
649		 */
650		if (chunk->tsn_gap_acked) {
651			list_move_tail(&chunk->transmitted_list,
652				       &transport->transmitted);
653			continue;
654		}
655
656		/* If we are doing fast retransmit, ignore non-fast_rtransmit
657		 * chunks
658		 */
659		if (fast_rtx && !chunk->fast_retransmit)
660			continue;
661
662redo:
663		/* Attempt to append this chunk to the packet. */
664		status = sctp_packet_append_chunk(pkt, chunk);
665
666		switch (status) {
667		case SCTP_XMIT_PMTU_FULL:
668			if (!pkt->has_data && !pkt->has_cookie_echo) {
669				/* If this packet did not contain DATA then
670				 * retransmission did not happen, so do it
671				 * again.  We'll ignore the error here since
672				 * control chunks are already freed so there
673				 * is nothing we can do.
674				 */
675				sctp_packet_transmit(pkt, gfp);
676				goto redo;
677			}
678
679			/* Send this packet.  */
680			error = sctp_packet_transmit(pkt, gfp);
681
682			/* If we are retransmitting, we should only
683			 * send a single packet.
684			 * Otherwise, try appending this chunk again.
685			 */
686			if (rtx_timeout || fast_rtx)
687				done = 1;
688			else
689				goto redo;
690
691			/* Bundle next chunk in the next round.  */
692			break;
693
694		case SCTP_XMIT_RWND_FULL:
695			/* Send this packet. */
696			error = sctp_packet_transmit(pkt, gfp);
697
698			/* Stop sending DATA as there is no more room
699			 * at the receiver.
700			 */
701			done = 1;
702			break;
703
704		case SCTP_XMIT_DELAY:
705			/* Send this packet. */
706			error = sctp_packet_transmit(pkt, gfp);
707
708			/* Stop sending DATA because of nagle delay. */
709			done = 1;
710			break;
711
712		default:
713			/* The append was successful, so add this chunk to
714			 * the transmitted list.
715			 */
716			list_move_tail(&chunk->transmitted_list,
717				       &transport->transmitted);
718
719			/* Mark the chunk as ineligible for fast retransmit
720			 * after it is retransmitted.
721			 */
722			if (chunk->fast_retransmit == SCTP_NEED_FRTX)
723				chunk->fast_retransmit = SCTP_DONT_FRTX;
724
725			q->asoc->stats.rtxchunks++;
726			break;
727		}
728
729		/* Set the timer if there were no errors */
730		if (!error && !timer)
731			timer = 1;
732
733		if (done)
734			break;
735	}
736
737	/* If we are here due to a retransmit timeout or a fast
738	 * retransmit and if there are any chunks left in the retransmit
739	 * queue that could not fit in the PMTU sized packet, they need
740	 * to be marked as ineligible for a subsequent fast retransmit.
741	 */
742	if (rtx_timeout || fast_rtx) {
743		list_for_each_entry(chunk1, lqueue, transmitted_list) {
744			if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
745				chunk1->fast_retransmit = SCTP_DONT_FRTX;
746		}
747	}
748
749	*start_timer = timer;
750
751	/* Clear fast retransmit hint */
752	if (fast_rtx)
753		q->fast_rtx = 0;
754
755	return error;
756}
757
758/* Cork the outqueue so queued chunks are really queued. */
759void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
760{
761	if (q->cork)
762		q->cork = 0;
763
764	sctp_outq_flush(q, 0, gfp);
765}
766
767static int sctp_packet_singleton(struct sctp_transport *transport,
768				 struct sctp_chunk *chunk, gfp_t gfp)
769{
770	const struct sctp_association *asoc = transport->asoc;
771	const __u16 sport = asoc->base.bind_addr.port;
772	const __u16 dport = asoc->peer.port;
773	const __u32 vtag = asoc->peer.i.init_tag;
774	struct sctp_packet singleton;
775
776	sctp_packet_init(&singleton, transport, sport, dport);
777	sctp_packet_config(&singleton, vtag, 0);
778	if (sctp_packet_append_chunk(&singleton, chunk) != SCTP_XMIT_OK) {
779		list_del_init(&chunk->list);
780		sctp_chunk_free(chunk);
781		return -ENOMEM;
782	}
783	return sctp_packet_transmit(&singleton, gfp);
784}
785
786/* Struct to hold the context during sctp outq flush */
787struct sctp_flush_ctx {
788	struct sctp_outq *q;
789	/* Current transport being used. It's NOT the same as curr active one */
790	struct sctp_transport *transport;
791	/* These transports have chunks to send. */
792	struct list_head transport_list;
793	struct sctp_association *asoc;
794	/* Packet on the current transport above */
795	struct sctp_packet *packet;
796	gfp_t gfp;
797};
798
799/* transport: current transport */
800static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
801				       struct sctp_chunk *chunk)
802{
803	struct sctp_transport *new_transport = chunk->transport;
804
805	if (!new_transport) {
806		if (!sctp_chunk_is_data(chunk)) {
807			/* If we have a prior transport pointer, see if
808			 * the destination address of the chunk
809			 * matches the destination address of the
810			 * current transport.  If not a match, then
811			 * try to look up the transport with a given
812			 * destination address.  We do this because
813			 * after processing ASCONFs, we may have new
814			 * transports created.
815			 */
816			if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest,
817							&ctx->transport->ipaddr))
818				new_transport = ctx->transport;
819			else
820				new_transport = sctp_assoc_lookup_paddr(ctx->asoc,
821								  &chunk->dest);
822		}
823
824		/* if we still don't have a new transport, then
825		 * use the current active path.
826		 */
827		if (!new_transport)
828			new_transport = ctx->asoc->peer.active_path;
829	} else {
830		__u8 type;
831
832		switch (new_transport->state) {
833		case SCTP_INACTIVE:
834		case SCTP_UNCONFIRMED:
835		case SCTP_PF:
836			/* If the chunk is Heartbeat or Heartbeat Ack,
837			 * send it to chunk->transport, even if it's
838			 * inactive.
839			 *
840			 * 3.3.6 Heartbeat Acknowledgement:
841			 * ...
842			 * A HEARTBEAT ACK is always sent to the source IP
843			 * address of the IP datagram containing the
844			 * HEARTBEAT chunk to which this ack is responding.
845			 * ...
846			 *
847			 * ASCONF_ACKs also must be sent to the source.
848			 */
849			type = chunk->chunk_hdr->type;
850			if (type != SCTP_CID_HEARTBEAT &&
851			    type != SCTP_CID_HEARTBEAT_ACK &&
852			    type != SCTP_CID_ASCONF_ACK)
853				new_transport = ctx->asoc->peer.active_path;
854			break;
855		default:
856			break;
857		}
858	}
859
860	/* Are we switching transports? Take care of transport locks. */
861	if (new_transport != ctx->transport) {
862		ctx->transport = new_transport;
863		ctx->packet = &ctx->transport->packet;
864
865		if (list_empty(&ctx->transport->send_ready))
866			list_add_tail(&ctx->transport->send_ready,
867				      &ctx->transport_list);
868
869		sctp_packet_config(ctx->packet,
870				   ctx->asoc->peer.i.init_tag,
871				   ctx->asoc->peer.ecn_capable);
872		/* We've switched transports, so apply the
873		 * Burst limit to the new transport.
874		 */
875		sctp_transport_burst_limited(ctx->transport);
876	}
877}
878
879static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
880{
881	struct sctp_chunk *chunk, *tmp;
882	enum sctp_xmit status;
883	int one_packet, error;
884
885	list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) {
886		one_packet = 0;
887
888		/* RFC 5061, 5.3
889		 * F1) This means that until such time as the ASCONF
890		 * containing the add is acknowledged, the sender MUST
891		 * NOT use the new IP address as a source for ANY SCTP
892		 * packet except on carrying an ASCONF Chunk.
893		 */
894		if (ctx->asoc->src_out_of_asoc_ok &&
895		    chunk->chunk_hdr->type != SCTP_CID_ASCONF)
896			continue;
897
898		list_del_init(&chunk->list);
899
900		/* Pick the right transport to use. Should always be true for
901		 * the first chunk as we don't have a transport by then.
902		 */
903		sctp_outq_select_transport(ctx, chunk);
904
905		switch (chunk->chunk_hdr->type) {
906		/* 6.10 Bundling
907		 *   ...
908		 *   An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
909		 *   COMPLETE with any other chunks.  [Send them immediately.]
910		 */
911		case SCTP_CID_INIT:
912		case SCTP_CID_INIT_ACK:
913		case SCTP_CID_SHUTDOWN_COMPLETE:
914			error = sctp_packet_singleton(ctx->transport, chunk,
915						      ctx->gfp);
916			if (error < 0) {
917				ctx->asoc->base.sk->sk_err = -error;
918				return;
919			}
920			ctx->asoc->stats.octrlchunks++;
921			break;
922
923		case SCTP_CID_ABORT:
924			if (sctp_test_T_bit(chunk))
925				ctx->packet->vtag = ctx->asoc->c.my_vtag;
926			fallthrough;
927
928		/* The following chunks are "response" chunks, i.e.
929		 * they are generated in response to something we
930		 * received.  If we are sending these, then we can
931		 * send only 1 packet containing these chunks.
932		 */
933		case SCTP_CID_HEARTBEAT_ACK:
934		case SCTP_CID_SHUTDOWN_ACK:
935		case SCTP_CID_COOKIE_ACK:
936		case SCTP_CID_COOKIE_ECHO:
937		case SCTP_CID_ERROR:
938		case SCTP_CID_ECN_CWR:
939		case SCTP_CID_ASCONF_ACK:
940			one_packet = 1;
941			fallthrough;
942
943		case SCTP_CID_HEARTBEAT:
944			if (chunk->pmtu_probe) {
945				error = sctp_packet_singleton(ctx->transport,
946							      chunk, ctx->gfp);
947				if (!error)
948					ctx->asoc->stats.octrlchunks++;
949				break;
950			}
951			fallthrough;
952		case SCTP_CID_SACK:
953		case SCTP_CID_SHUTDOWN:
954		case SCTP_CID_ECN_ECNE:
955		case SCTP_CID_ASCONF:
956		case SCTP_CID_FWD_TSN:
957		case SCTP_CID_I_FWD_TSN:
958		case SCTP_CID_RECONF:
959			status = sctp_packet_transmit_chunk(ctx->packet, chunk,
960							    one_packet, ctx->gfp);
961			if (status != SCTP_XMIT_OK) {
962				/* put the chunk back */
963				list_add(&chunk->list, &ctx->q->control_chunk_list);
964				break;
965			}
966
967			ctx->asoc->stats.octrlchunks++;
968			/* PR-SCTP C5) If a FORWARD TSN is sent, the
969			 * sender MUST assure that at least one T3-rtx
970			 * timer is running.
971			 */
972			if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
973			    chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
974				sctp_transport_reset_t3_rtx(ctx->transport);
975				ctx->transport->last_time_sent = jiffies;
976			}
977
978			if (chunk == ctx->asoc->strreset_chunk)
979				sctp_transport_reset_reconf_timer(ctx->transport);
980
981			break;
982
983		default:
984			/* We built a chunk with an illegal type! */
985			BUG();
986		}
987	}
988}
989
990/* Returns false if new data shouldn't be sent */
991static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
992				int rtx_timeout)
993{
994	int error, start_timer = 0;
995
996	if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
997		return false;
998
999	if (ctx->transport != ctx->asoc->peer.retran_path) {
1000		/* Switch transports & prepare the packet.  */
1001		ctx->transport = ctx->asoc->peer.retran_path;
1002		ctx->packet = &ctx->transport->packet;
1003
1004		if (list_empty(&ctx->transport->send_ready))
1005			list_add_tail(&ctx->transport->send_ready,
1006				      &ctx->transport_list);
1007
1008		sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag,
1009				   ctx->asoc->peer.ecn_capable);
1010	}
1011
1012	error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout,
1013				      &start_timer, ctx->gfp);
1014	if (error < 0)
1015		ctx->asoc->base.sk->sk_err = -error;
1016
1017	if (start_timer) {
1018		sctp_transport_reset_t3_rtx(ctx->transport);
1019		ctx->transport->last_time_sent = jiffies;
1020	}
1021
1022	/* This can happen on COOKIE-ECHO resend.  Only
1023	 * one chunk can get bundled with a COOKIE-ECHO.
1024	 */
1025	if (ctx->packet->has_cookie_echo)
1026		return false;
1027
1028	/* Don't send new data if there is still data
1029	 * waiting to retransmit.
1030	 */
1031	if (!list_empty(&ctx->q->retransmit))
1032		return false;
1033
1034	return true;
1035}
1036
1037static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
1038				 int rtx_timeout)
1039{
1040	struct sctp_chunk *chunk;
1041	enum sctp_xmit status;
1042
1043	/* Is it OK to send data chunks?  */
1044	switch (ctx->asoc->state) {
1045	case SCTP_STATE_COOKIE_ECHOED:
1046		/* Only allow bundling when this packet has a COOKIE-ECHO
1047		 * chunk.
1048		 */
1049		if (!ctx->packet || !ctx->packet->has_cookie_echo)
1050			return;
1051
1052		fallthrough;
1053	case SCTP_STATE_ESTABLISHED:
1054	case SCTP_STATE_SHUTDOWN_PENDING:
1055	case SCTP_STATE_SHUTDOWN_RECEIVED:
1056		break;
1057
1058	default:
1059		/* Do nothing. */
1060		return;
1061	}
1062
1063	/* RFC 2960 6.1  Transmission of DATA Chunks
1064	 *
1065	 * C) When the time comes for the sender to transmit,
1066	 * before sending new DATA chunks, the sender MUST
1067	 * first transmit any outstanding DATA chunks which
1068	 * are marked for retransmission (limited by the
1069	 * current cwnd).
1070	 */
1071	if (!list_empty(&ctx->q->retransmit) &&
1072	    !sctp_outq_flush_rtx(ctx, rtx_timeout))
1073		return;
1074
1075	/* Apply Max.Burst limitation to the current transport in
1076	 * case it will be used for new data.  We are going to
1077	 * rest it before we return, but we want to apply the limit
1078	 * to the currently queued data.
1079	 */
1080	if (ctx->transport)
1081		sctp_transport_burst_limited(ctx->transport);
1082
1083	/* Finally, transmit new packets.  */
1084	while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) {
1085		__u32 sid = ntohs(chunk->subh.data_hdr->stream);
1086		__u8 stream_state = SCTP_SO(&ctx->asoc->stream, sid)->state;
1087
1088		/* Has this chunk expired? */
1089		if (sctp_chunk_abandoned(chunk)) {
1090			sctp_sched_dequeue_done(ctx->q, chunk);
1091			sctp_chunk_fail(chunk, 0);
1092			sctp_chunk_free(chunk);
1093			continue;
1094		}
1095
1096		if (stream_state == SCTP_STREAM_CLOSED) {
1097			sctp_outq_head_data(ctx->q, chunk);
1098			break;
1099		}
1100
1101		sctp_outq_select_transport(ctx, chunk);
1102
1103		pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n",
1104			 __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ?
1105			 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1106			 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1107			 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1108			 refcount_read(&chunk->skb->users) : -1);
1109
1110		/* Add the chunk to the packet.  */
1111		status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0,
1112						    ctx->gfp);
1113		if (status != SCTP_XMIT_OK) {
1114			/* We could not append this chunk, so put
1115			 * the chunk back on the output queue.
1116			 */
1117			pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1118				 __func__, ntohl(chunk->subh.data_hdr->tsn),
1119				 status);
1120
1121			sctp_outq_head_data(ctx->q, chunk);
1122			break;
1123		}
1124
1125		/* The sender is in the SHUTDOWN-PENDING state,
1126		 * The sender MAY set the I-bit in the DATA
1127		 * chunk header.
1128		 */
1129		if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1130			chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1131		if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1132			ctx->asoc->stats.ouodchunks++;
1133		else
1134			ctx->asoc->stats.oodchunks++;
1135
1136		/* Only now it's safe to consider this
1137		 * chunk as sent, sched-wise.
1138		 */
1139		sctp_sched_dequeue_done(ctx->q, chunk);
1140
1141		list_add_tail(&chunk->transmitted_list,
1142			      &ctx->transport->transmitted);
1143
1144		sctp_transport_reset_t3_rtx(ctx->transport);
1145		ctx->transport->last_time_sent = jiffies;
1146
1147		/* Only let one DATA chunk get bundled with a
1148		 * COOKIE-ECHO chunk.
1149		 */
1150		if (ctx->packet->has_cookie_echo)
1151			break;
1152	}
1153}
1154
1155static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
1156{
1157	struct sock *sk = ctx->asoc->base.sk;
1158	struct list_head *ltransport;
1159	struct sctp_packet *packet;
1160	struct sctp_transport *t;
1161	int error = 0;
1162
1163	while ((ltransport = sctp_list_dequeue(&ctx->transport_list)) != NULL) {
1164		t = list_entry(ltransport, struct sctp_transport, send_ready);
1165		packet = &t->packet;
1166		if (!sctp_packet_empty(packet)) {
1167			rcu_read_lock();
1168			if (t->dst && __sk_dst_get(sk) != t->dst) {
1169				dst_hold(t->dst);
1170				sk_setup_caps(sk, t->dst);
1171			}
1172			rcu_read_unlock();
1173			error = sctp_packet_transmit(packet, ctx->gfp);
1174			if (error < 0)
1175				ctx->q->asoc->base.sk->sk_err = -error;
1176		}
1177
1178		/* Clear the burst limited state, if any */
1179		sctp_transport_burst_reset(t);
1180	}
1181}
1182
1183/* Try to flush an outqueue.
1184 *
1185 * Description: Send everything in q which we legally can, subject to
1186 * congestion limitations.
1187 * * Note: This function can be called from multiple contexts so appropriate
1188 * locking concerns must be made.  Today we use the sock lock to protect
1189 * this function.
1190 */
1191
1192static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1193{
1194	struct sctp_flush_ctx ctx = {
1195		.q = q,
1196		.transport = NULL,
1197		.transport_list = LIST_HEAD_INIT(ctx.transport_list),
1198		.asoc = q->asoc,
1199		.packet = NULL,
1200		.gfp = gfp,
1201	};
1202
1203	/* 6.10 Bundling
1204	 *   ...
1205	 *   When bundling control chunks with DATA chunks, an
1206	 *   endpoint MUST place control chunks first in the outbound
1207	 *   SCTP packet.  The transmitter MUST transmit DATA chunks
1208	 *   within a SCTP packet in increasing order of TSN.
1209	 *   ...
1210	 */
1211
1212	sctp_outq_flush_ctrl(&ctx);
1213
1214	if (q->asoc->src_out_of_asoc_ok)
1215		goto sctp_flush_out;
1216
1217	sctp_outq_flush_data(&ctx, rtx_timeout);
1218
1219sctp_flush_out:
1220
1221	sctp_outq_flush_transports(&ctx);
1222}
1223
1224/* Update unack_data based on the incoming SACK chunk */
1225static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1226					struct sctp_sackhdr *sack)
1227{
1228	union sctp_sack_variable *frags;
1229	__u16 unack_data;
1230	int i;
1231
1232	unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1233
1234	frags = (union sctp_sack_variable *)(sack + 1);
1235	for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1236		unack_data -= ((ntohs(frags[i].gab.end) -
1237				ntohs(frags[i].gab.start) + 1));
1238	}
1239
1240	assoc->unack_data = unack_data;
1241}
1242
1243/* This is where we REALLY process a SACK.
1244 *
1245 * Process the SACK against the outqueue.  Mostly, this just frees
1246 * things off the transmitted queue.
1247 */
1248int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1249{
1250	struct sctp_association *asoc = q->asoc;
1251	struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1252	struct sctp_transport *transport;
1253	struct sctp_chunk *tchunk = NULL;
1254	struct list_head *lchunk, *transport_list, *temp;
1255	__u32 sack_ctsn, ctsn, tsn;
1256	__u32 highest_tsn, highest_new_tsn;
1257	__u32 sack_a_rwnd;
1258	unsigned int outstanding;
1259	struct sctp_transport *primary = asoc->peer.primary_path;
1260	int count_of_newacks = 0;
1261	int gap_ack_blocks;
1262	u8 accum_moved = 0;
1263
1264	/* Grab the association's destination address list. */
1265	transport_list = &asoc->peer.transport_addr_list;
1266
1267	/* SCTP path tracepoint for congestion control debugging. */
1268	if (trace_sctp_probe_path_enabled()) {
1269		list_for_each_entry(transport, transport_list, transports)
1270			trace_sctp_probe_path(transport, asoc);
1271	}
1272
1273	sack_ctsn = ntohl(sack->cum_tsn_ack);
1274	gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1275	asoc->stats.gapcnt += gap_ack_blocks;
1276	/*
1277	 * SFR-CACC algorithm:
1278	 * On receipt of a SACK the sender SHOULD execute the
1279	 * following statements.
1280	 *
1281	 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1282	 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1283	 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1284	 * all destinations.
1285	 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1286	 * is set the receiver of the SACK MUST take the following actions:
1287	 *
1288	 * A) Initialize the cacc_saw_newack to 0 for all destination
1289	 * addresses.
1290	 *
1291	 * Only bother if changeover_active is set. Otherwise, this is
1292	 * totally suboptimal to do on every SACK.
1293	 */
1294	if (primary->cacc.changeover_active) {
1295		u8 clear_cycling = 0;
1296
1297		if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1298			primary->cacc.changeover_active = 0;
1299			clear_cycling = 1;
1300		}
1301
1302		if (clear_cycling || gap_ack_blocks) {
1303			list_for_each_entry(transport, transport_list,
1304					transports) {
1305				if (clear_cycling)
1306					transport->cacc.cycling_changeover = 0;
1307				if (gap_ack_blocks)
1308					transport->cacc.cacc_saw_newack = 0;
1309			}
1310		}
1311	}
1312
1313	/* Get the highest TSN in the sack. */
1314	highest_tsn = sack_ctsn;
1315	if (gap_ack_blocks) {
1316		union sctp_sack_variable *frags =
1317			(union sctp_sack_variable *)(sack + 1);
1318
1319		highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1320	}
1321
1322	if (TSN_lt(asoc->highest_sacked, highest_tsn))
1323		asoc->highest_sacked = highest_tsn;
1324
1325	highest_new_tsn = sack_ctsn;
1326
1327	/* Run through the retransmit queue.  Credit bytes received
1328	 * and free those chunks that we can.
1329	 */
1330	sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1331
1332	/* Run through the transmitted queue.
1333	 * Credit bytes received and free those chunks which we can.
1334	 *
1335	 * This is a MASSIVE candidate for optimization.
1336	 */
1337	list_for_each_entry(transport, transport_list, transports) {
1338		sctp_check_transmitted(q, &transport->transmitted,
1339				       transport, &chunk->source, sack,
1340				       &highest_new_tsn);
1341		/*
1342		 * SFR-CACC algorithm:
1343		 * C) Let count_of_newacks be the number of
1344		 * destinations for which cacc_saw_newack is set.
1345		 */
1346		if (transport->cacc.cacc_saw_newack)
1347			count_of_newacks++;
1348	}
1349
1350	/* Move the Cumulative TSN Ack Point if appropriate.  */
1351	if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1352		asoc->ctsn_ack_point = sack_ctsn;
1353		accum_moved = 1;
1354	}
1355
1356	if (gap_ack_blocks) {
1357
1358		if (asoc->fast_recovery && accum_moved)
1359			highest_new_tsn = highest_tsn;
1360
1361		list_for_each_entry(transport, transport_list, transports)
1362			sctp_mark_missing(q, &transport->transmitted, transport,
1363					  highest_new_tsn, count_of_newacks);
1364	}
1365
1366	/* Update unack_data field in the assoc. */
1367	sctp_sack_update_unack_data(asoc, sack);
1368
1369	ctsn = asoc->ctsn_ack_point;
1370
1371	/* Throw away stuff rotting on the sack queue.  */
1372	list_for_each_safe(lchunk, temp, &q->sacked) {
1373		tchunk = list_entry(lchunk, struct sctp_chunk,
1374				    transmitted_list);
1375		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1376		if (TSN_lte(tsn, ctsn)) {
1377			list_del_init(&tchunk->transmitted_list);
1378			if (asoc->peer.prsctp_capable &&
1379			    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1380				asoc->sent_cnt_removable--;
1381			sctp_chunk_free(tchunk);
1382		}
1383	}
1384
1385	/* ii) Set rwnd equal to the newly received a_rwnd minus the
1386	 *     number of bytes still outstanding after processing the
1387	 *     Cumulative TSN Ack and the Gap Ack Blocks.
1388	 */
1389
1390	sack_a_rwnd = ntohl(sack->a_rwnd);
1391	asoc->peer.zero_window_announced = !sack_a_rwnd;
1392	outstanding = q->outstanding_bytes;
1393
1394	if (outstanding < sack_a_rwnd)
1395		sack_a_rwnd -= outstanding;
1396	else
1397		sack_a_rwnd = 0;
1398
1399	asoc->peer.rwnd = sack_a_rwnd;
1400
1401	asoc->stream.si->generate_ftsn(q, sack_ctsn);
1402
1403	pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
1404	pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1405		 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1406		 asoc->adv_peer_ack_point);
1407
1408	return sctp_outq_is_empty(q);
1409}
1410
1411/* Is the outqueue empty?
1412 * The queue is empty when we have not pending data, no in-flight data
1413 * and nothing pending retransmissions.
1414 */
1415int sctp_outq_is_empty(const struct sctp_outq *q)
1416{
1417	return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
1418	       list_empty(&q->retransmit);
1419}
1420
1421/********************************************************************
1422 * 2nd Level Abstractions
1423 ********************************************************************/
1424
1425/* Go through a transport's transmitted list or the association's retransmit
1426 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1427 * The retransmit list will not have an associated transport.
1428 *
1429 * I added coherent debug information output.	--xguo
1430 *
1431 * Instead of printing 'sacked' or 'kept' for each TSN on the
1432 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1433 * KEPT TSN6-TSN7, etc.
1434 */
1435static void sctp_check_transmitted(struct sctp_outq *q,
1436				   struct list_head *transmitted_queue,
1437				   struct sctp_transport *transport,
1438				   union sctp_addr *saddr,
1439				   struct sctp_sackhdr *sack,
1440				   __u32 *highest_new_tsn_in_sack)
1441{
1442	struct list_head *lchunk;
1443	struct sctp_chunk *tchunk;
1444	struct list_head tlist;
1445	__u32 tsn;
1446	__u32 sack_ctsn;
1447	__u32 rtt;
1448	__u8 restart_timer = 0;
1449	int bytes_acked = 0;
1450	int migrate_bytes = 0;
1451	bool forward_progress = false;
1452
1453	sack_ctsn = ntohl(sack->cum_tsn_ack);
1454
1455	INIT_LIST_HEAD(&tlist);
1456
1457	/* The while loop will skip empty transmitted queues. */
1458	while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1459		tchunk = list_entry(lchunk, struct sctp_chunk,
1460				    transmitted_list);
1461
1462		if (sctp_chunk_abandoned(tchunk)) {
1463			/* Move the chunk to abandoned list. */
1464			sctp_insert_list(&q->abandoned, lchunk);
1465
1466			/* If this chunk has not been acked, stop
1467			 * considering it as 'outstanding'.
1468			 */
1469			if (transmitted_queue != &q->retransmit &&
1470			    !tchunk->tsn_gap_acked) {
1471				if (tchunk->transport)
1472					tchunk->transport->flight_size -=
1473							sctp_data_size(tchunk);
1474				q->outstanding_bytes -= sctp_data_size(tchunk);
1475			}
1476			continue;
1477		}
1478
1479		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1480		if (sctp_acked(sack, tsn)) {
1481			/* If this queue is the retransmit queue, the
1482			 * retransmit timer has already reclaimed
1483			 * the outstanding bytes for this chunk, so only
1484			 * count bytes associated with a transport.
1485			 */
1486			if (transport && !tchunk->tsn_gap_acked) {
1487				/* If this chunk is being used for RTT
1488				 * measurement, calculate the RTT and update
1489				 * the RTO using this value.
1490				 *
1491				 * 6.3.1 C5) Karn's algorithm: RTT measurements
1492				 * MUST NOT be made using packets that were
1493				 * retransmitted (and thus for which it is
1494				 * ambiguous whether the reply was for the
1495				 * first instance of the packet or a later
1496				 * instance).
1497				 */
1498				if (!sctp_chunk_retransmitted(tchunk) &&
1499				    tchunk->rtt_in_progress) {
1500					tchunk->rtt_in_progress = 0;
1501					rtt = jiffies - tchunk->sent_at;
1502					sctp_transport_update_rto(transport,
1503								  rtt);
1504				}
1505
1506				if (TSN_lte(tsn, sack_ctsn)) {
1507					/*
1508					 * SFR-CACC algorithm:
1509					 * 2) If the SACK contains gap acks
1510					 * and the flag CHANGEOVER_ACTIVE is
1511					 * set the receiver of the SACK MUST
1512					 * take the following action:
1513					 *
1514					 * B) For each TSN t being acked that
1515					 * has not been acked in any SACK so
1516					 * far, set cacc_saw_newack to 1 for
1517					 * the destination that the TSN was
1518					 * sent to.
1519					 */
1520					if (sack->num_gap_ack_blocks &&
1521					    q->asoc->peer.primary_path->cacc.
1522					    changeover_active)
1523						transport->cacc.cacc_saw_newack
1524							= 1;
1525				}
1526			}
1527
1528			/* If the chunk hasn't been marked as ACKED,
1529			 * mark it and account bytes_acked if the
1530			 * chunk had a valid transport (it will not
1531			 * have a transport if ASCONF had deleted it
1532			 * while DATA was outstanding).
1533			 */
1534			if (!tchunk->tsn_gap_acked) {
1535				tchunk->tsn_gap_acked = 1;
1536				if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1537					*highest_new_tsn_in_sack = tsn;
1538				bytes_acked += sctp_data_size(tchunk);
1539				if (!tchunk->transport)
1540					migrate_bytes += sctp_data_size(tchunk);
1541				forward_progress = true;
1542			}
1543
1544			if (TSN_lte(tsn, sack_ctsn)) {
1545				/* RFC 2960  6.3.2 Retransmission Timer Rules
1546				 *
1547				 * R3) Whenever a SACK is received
1548				 * that acknowledges the DATA chunk
1549				 * with the earliest outstanding TSN
1550				 * for that address, restart T3-rtx
1551				 * timer for that address with its
1552				 * current RTO.
1553				 */
1554				restart_timer = 1;
1555				forward_progress = true;
1556
1557				list_add_tail(&tchunk->transmitted_list,
1558					      &q->sacked);
1559			} else {
1560				/* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1561				 * M2) Each time a SACK arrives reporting
1562				 * 'Stray DATA chunk(s)' record the highest TSN
1563				 * reported as newly acknowledged, call this
1564				 * value 'HighestTSNinSack'. A newly
1565				 * acknowledged DATA chunk is one not
1566				 * previously acknowledged in a SACK.
1567				 *
1568				 * When the SCTP sender of data receives a SACK
1569				 * chunk that acknowledges, for the first time,
1570				 * the receipt of a DATA chunk, all the still
1571				 * unacknowledged DATA chunks whose TSN is
1572				 * older than that newly acknowledged DATA
1573				 * chunk, are qualified as 'Stray DATA chunks'.
1574				 */
1575				list_add_tail(lchunk, &tlist);
1576			}
1577		} else {
1578			if (tchunk->tsn_gap_acked) {
1579				pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1580					 __func__, tsn);
1581
1582				tchunk->tsn_gap_acked = 0;
1583
1584				if (tchunk->transport)
1585					bytes_acked -= sctp_data_size(tchunk);
1586
1587				/* RFC 2960 6.3.2 Retransmission Timer Rules
1588				 *
1589				 * R4) Whenever a SACK is received missing a
1590				 * TSN that was previously acknowledged via a
1591				 * Gap Ack Block, start T3-rtx for the
1592				 * destination address to which the DATA
1593				 * chunk was originally
1594				 * transmitted if it is not already running.
1595				 */
1596				restart_timer = 1;
1597			}
1598
1599			list_add_tail(lchunk, &tlist);
1600		}
1601	}
1602
1603	if (transport) {
1604		if (bytes_acked) {
1605			struct sctp_association *asoc = transport->asoc;
1606
1607			/* We may have counted DATA that was migrated
1608			 * to this transport due to DEL-IP operation.
1609			 * Subtract those bytes, since the were never
1610			 * send on this transport and shouldn't be
1611			 * credited to this transport.
1612			 */
1613			bytes_acked -= migrate_bytes;
1614
1615			/* 8.2. When an outstanding TSN is acknowledged,
1616			 * the endpoint shall clear the error counter of
1617			 * the destination transport address to which the
1618			 * DATA chunk was last sent.
1619			 * The association's overall error counter is
1620			 * also cleared.
1621			 */
1622			transport->error_count = 0;
1623			transport->asoc->overall_error_count = 0;
1624			forward_progress = true;
1625
1626			/*
1627			 * While in SHUTDOWN PENDING, we may have started
1628			 * the T5 shutdown guard timer after reaching the
1629			 * retransmission limit. Stop that timer as soon
1630			 * as the receiver acknowledged any data.
1631			 */
1632			if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1633			    del_timer(&asoc->timers
1634				[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1635					sctp_association_put(asoc);
1636
1637			/* Mark the destination transport address as
1638			 * active if it is not so marked.
1639			 */
1640			if ((transport->state == SCTP_INACTIVE ||
1641			     transport->state == SCTP_UNCONFIRMED) &&
1642			    sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1643				sctp_assoc_control_transport(
1644					transport->asoc,
1645					transport,
1646					SCTP_TRANSPORT_UP,
1647					SCTP_RECEIVED_SACK);
1648			}
1649
1650			sctp_transport_raise_cwnd(transport, sack_ctsn,
1651						  bytes_acked);
1652
1653			transport->flight_size -= bytes_acked;
1654			if (transport->flight_size == 0)
1655				transport->partial_bytes_acked = 0;
1656			q->outstanding_bytes -= bytes_acked + migrate_bytes;
1657		} else {
1658			/* RFC 2960 6.1, sctpimpguide-06 2.15.2
1659			 * When a sender is doing zero window probing, it
1660			 * should not timeout the association if it continues
1661			 * to receive new packets from the receiver. The
1662			 * reason is that the receiver MAY keep its window
1663			 * closed for an indefinite time.
1664			 * A sender is doing zero window probing when the
1665			 * receiver's advertised window is zero, and there is
1666			 * only one data chunk in flight to the receiver.
1667			 *
1668			 * Allow the association to timeout while in SHUTDOWN
1669			 * PENDING or SHUTDOWN RECEIVED in case the receiver
1670			 * stays in zero window mode forever.
1671			 */
1672			if (!q->asoc->peer.rwnd &&
1673			    !list_empty(&tlist) &&
1674			    (sack_ctsn+2 == q->asoc->next_tsn) &&
1675			    q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1676				pr_debug("%s: sack received for zero window "
1677					 "probe:%u\n", __func__, sack_ctsn);
1678
1679				q->asoc->overall_error_count = 0;
1680				transport->error_count = 0;
1681			}
1682		}
1683
1684		/* RFC 2960 6.3.2 Retransmission Timer Rules
1685		 *
1686		 * R2) Whenever all outstanding data sent to an address have
1687		 * been acknowledged, turn off the T3-rtx timer of that
1688		 * address.
1689		 */
1690		if (!transport->flight_size) {
1691			if (del_timer(&transport->T3_rtx_timer))
1692				sctp_transport_put(transport);
1693		} else if (restart_timer) {
1694			if (!mod_timer(&transport->T3_rtx_timer,
1695				       jiffies + transport->rto))
1696				sctp_transport_hold(transport);
1697		}
1698
1699		if (forward_progress) {
1700			if (transport->dst)
1701				sctp_transport_dst_confirm(transport);
1702		}
1703	}
1704
1705	list_splice(&tlist, transmitted_queue);
1706}
1707
1708/* Mark chunks as missing and consequently may get retransmitted. */
1709static void sctp_mark_missing(struct sctp_outq *q,
1710			      struct list_head *transmitted_queue,
1711			      struct sctp_transport *transport,
1712			      __u32 highest_new_tsn_in_sack,
1713			      int count_of_newacks)
1714{
1715	struct sctp_chunk *chunk;
1716	__u32 tsn;
1717	char do_fast_retransmit = 0;
1718	struct sctp_association *asoc = q->asoc;
1719	struct sctp_transport *primary = asoc->peer.primary_path;
1720
1721	list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1722
1723		tsn = ntohl(chunk->subh.data_hdr->tsn);
1724
1725		/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1726		 * 'Unacknowledged TSN's', if the TSN number of an
1727		 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1728		 * value, increment the 'TSN.Missing.Report' count on that
1729		 * chunk if it has NOT been fast retransmitted or marked for
1730		 * fast retransmit already.
1731		 */
1732		if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1733		    !chunk->tsn_gap_acked &&
1734		    TSN_lt(tsn, highest_new_tsn_in_sack)) {
1735
1736			/* SFR-CACC may require us to skip marking
1737			 * this chunk as missing.
1738			 */
1739			if (!transport || !sctp_cacc_skip(primary,
1740						chunk->transport,
1741						count_of_newacks, tsn)) {
1742				chunk->tsn_missing_report++;
1743
1744				pr_debug("%s: tsn:0x%x missing counter:%d\n",
1745					 __func__, tsn, chunk->tsn_missing_report);
1746			}
1747		}
1748		/*
1749		 * M4) If any DATA chunk is found to have a
1750		 * 'TSN.Missing.Report'
1751		 * value larger than or equal to 3, mark that chunk for
1752		 * retransmission and start the fast retransmit procedure.
1753		 */
1754
1755		if (chunk->tsn_missing_report >= 3) {
1756			chunk->fast_retransmit = SCTP_NEED_FRTX;
1757			do_fast_retransmit = 1;
1758		}
1759	}
1760
1761	if (transport) {
1762		if (do_fast_retransmit)
1763			sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1764
1765		pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1766			 "flight_size:%d, pba:%d\n",  __func__, transport,
1767			 transport->cwnd, transport->ssthresh,
1768			 transport->flight_size, transport->partial_bytes_acked);
1769	}
1770}
1771
1772/* Is the given TSN acked by this packet?  */
1773static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1774{
1775	__u32 ctsn = ntohl(sack->cum_tsn_ack);
1776	union sctp_sack_variable *frags;
1777	__u16 tsn_offset, blocks;
1778	int i;
1779
1780	if (TSN_lte(tsn, ctsn))
1781		goto pass;
1782
1783	/* 3.3.4 Selective Acknowledgment (SACK) (3):
1784	 *
1785	 * Gap Ack Blocks:
1786	 *  These fields contain the Gap Ack Blocks. They are repeated
1787	 *  for each Gap Ack Block up to the number of Gap Ack Blocks
1788	 *  defined in the Number of Gap Ack Blocks field. All DATA
1789	 *  chunks with TSNs greater than or equal to (Cumulative TSN
1790	 *  Ack + Gap Ack Block Start) and less than or equal to
1791	 *  (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1792	 *  Block are assumed to have been received correctly.
1793	 */
1794
1795	frags = (union sctp_sack_variable *)(sack + 1);
1796	blocks = ntohs(sack->num_gap_ack_blocks);
1797	tsn_offset = tsn - ctsn;
1798	for (i = 0; i < blocks; ++i) {
1799		if (tsn_offset >= ntohs(frags[i].gab.start) &&
1800		    tsn_offset <= ntohs(frags[i].gab.end))
1801			goto pass;
1802	}
1803
1804	return 0;
1805pass:
1806	return 1;
1807}
1808
1809static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1810				    int nskips, __be16 stream)
1811{
1812	int i;
1813
1814	for (i = 0; i < nskips; i++) {
1815		if (skiplist[i].stream == stream)
1816			return i;
1817	}
1818	return i;
1819}
1820
1821/* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1822void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1823{
1824	struct sctp_association *asoc = q->asoc;
1825	struct sctp_chunk *ftsn_chunk = NULL;
1826	struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1827	int nskips = 0;
1828	int skip_pos = 0;
1829	__u32 tsn;
1830	struct sctp_chunk *chunk;
1831	struct list_head *lchunk, *temp;
1832
1833	if (!asoc->peer.prsctp_capable)
1834		return;
1835
1836	/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1837	 * received SACK.
1838	 *
1839	 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1840	 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1841	 */
1842	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1843		asoc->adv_peer_ack_point = ctsn;
1844
1845	/* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1846	 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1847	 * the chunk next in the out-queue space is marked as "abandoned" as
1848	 * shown in the following example:
1849	 *
1850	 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1851	 * and the Advanced.Peer.Ack.Point is updated to this value:
1852	 *
1853	 *   out-queue at the end of  ==>   out-queue after Adv.Ack.Point
1854	 *   normal SACK processing           local advancement
1855	 *                ...                           ...
1856	 *   Adv.Ack.Pt-> 102 acked                     102 acked
1857	 *                103 abandoned                 103 abandoned
1858	 *                104 abandoned     Adv.Ack.P-> 104 abandoned
1859	 *                105                           105
1860	 *                106 acked                     106 acked
1861	 *                ...                           ...
1862	 *
1863	 * In this example, the data sender successfully advanced the
1864	 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1865	 */
1866	list_for_each_safe(lchunk, temp, &q->abandoned) {
1867		chunk = list_entry(lchunk, struct sctp_chunk,
1868					transmitted_list);
1869		tsn = ntohl(chunk->subh.data_hdr->tsn);
1870
1871		/* Remove any chunks in the abandoned queue that are acked by
1872		 * the ctsn.
1873		 */
1874		if (TSN_lte(tsn, ctsn)) {
1875			list_del_init(lchunk);
1876			sctp_chunk_free(chunk);
1877		} else {
1878			if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1879				asoc->adv_peer_ack_point = tsn;
1880				if (chunk->chunk_hdr->flags &
1881					 SCTP_DATA_UNORDERED)
1882					continue;
1883				skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1884						nskips,
1885						chunk->subh.data_hdr->stream);
1886				ftsn_skip_arr[skip_pos].stream =
1887					chunk->subh.data_hdr->stream;
1888				ftsn_skip_arr[skip_pos].ssn =
1889					 chunk->subh.data_hdr->ssn;
1890				if (skip_pos == nskips)
1891					nskips++;
1892				if (nskips == 10)
1893					break;
1894			} else
1895				break;
1896		}
1897	}
1898
1899	/* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1900	 * is greater than the Cumulative TSN ACK carried in the received
1901	 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1902	 * chunk containing the latest value of the
1903	 * "Advanced.Peer.Ack.Point".
1904	 *
1905	 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1906	 * list each stream and sequence number in the forwarded TSN. This
1907	 * information will enable the receiver to easily find any
1908	 * stranded TSN's waiting on stream reorder queues. Each stream
1909	 * SHOULD only be reported once; this means that if multiple
1910	 * abandoned messages occur in the same stream then only the
1911	 * highest abandoned stream sequence number is reported. If the
1912	 * total size of the FORWARD TSN does NOT fit in a single MTU then
1913	 * the sender of the FORWARD TSN SHOULD lower the
1914	 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1915	 * single MTU.
1916	 */
1917	if (asoc->adv_peer_ack_point > ctsn)
1918		ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1919					      nskips, &ftsn_skip_arr[0]);
1920
1921	if (ftsn_chunk) {
1922		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1923		SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
1924	}
1925}
1926