1// SPDX-License-Identifier: GPL-2.0-or-later
2/* SCTP kernel implementation
3 * (C) Copyright Red Hat Inc. 2017
4 *
5 * This file is part of the SCTP kernel implementation
6 *
7 * These functions implement sctp stream message interleaving, mostly
8 * including I-DATA and I-FORWARD-TSN chunks process.
9 *
10 * Please send any bug reports or fixes you make to the
11 * email addresched(es):
12 *    lksctp developers <linux-sctp@vger.kernel.org>
13 *
14 * Written or modified by:
15 *    Xin Long <lucien.xin@gmail.com>
16 */
17
18#include <net/busy_poll.h>
19#include <net/sctp/sctp.h>
20#include <net/sctp/sm.h>
21#include <net/sctp/ulpevent.h>
22#include <linux/sctp.h>
23
24static struct sctp_chunk *sctp_make_idatafrag_empty(
25					const struct sctp_association *asoc,
26					const struct sctp_sndrcvinfo *sinfo,
27					int len, __u8 flags, gfp_t gfp)
28{
29	struct sctp_chunk *retval;
30	struct sctp_idatahdr dp;
31
32	memset(&dp, 0, sizeof(dp));
33	dp.stream = htons(sinfo->sinfo_stream);
34
35	if (sinfo->sinfo_flags & SCTP_UNORDERED)
36		flags |= SCTP_DATA_UNORDERED;
37
38	retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
39	if (!retval)
40		return NULL;
41
42	retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
43	memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
44
45	return retval;
46}
47
48static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
49{
50	struct sctp_stream *stream;
51	struct sctp_chunk *lchunk;
52	__u32 cfsn = 0;
53	__u16 sid;
54
55	if (chunk->has_mid)
56		return;
57
58	sid = sctp_chunk_stream_no(chunk);
59	stream = &chunk->asoc->stream;
60
61	list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
62		struct sctp_idatahdr *hdr;
63		__u32 mid;
64
65		lchunk->has_mid = 1;
66
67		hdr = lchunk->subh.idata_hdr;
68
69		if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
70			hdr->ppid = lchunk->sinfo.sinfo_ppid;
71		else
72			hdr->fsn = htonl(cfsn++);
73
74		if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
75			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
76				sctp_mid_uo_next(stream, out, sid) :
77				sctp_mid_uo_peek(stream, out, sid);
78		} else {
79			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
80				sctp_mid_next(stream, out, sid) :
81				sctp_mid_peek(stream, out, sid);
82		}
83		hdr->mid = htonl(mid);
84	}
85}
86
87static bool sctp_validate_data(struct sctp_chunk *chunk)
88{
89	struct sctp_stream *stream;
90	__u16 sid, ssn;
91
92	if (chunk->chunk_hdr->type != SCTP_CID_DATA)
93		return false;
94
95	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
96		return true;
97
98	stream = &chunk->asoc->stream;
99	sid = sctp_chunk_stream_no(chunk);
100	ssn = ntohs(chunk->subh.data_hdr->ssn);
101
102	return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
103}
104
105static bool sctp_validate_idata(struct sctp_chunk *chunk)
106{
107	struct sctp_stream *stream;
108	__u32 mid;
109	__u16 sid;
110
111	if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
112		return false;
113
114	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
115		return true;
116
117	stream = &chunk->asoc->stream;
118	sid = sctp_chunk_stream_no(chunk);
119	mid = ntohl(chunk->subh.idata_hdr->mid);
120
121	return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
122}
123
124static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
125				  struct sctp_ulpevent *event)
126{
127	struct sctp_ulpevent *cevent;
128	struct sk_buff *pos, *loc;
129
130	pos = skb_peek_tail(&ulpq->reasm);
131	if (!pos) {
132		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
133		return;
134	}
135
136	cevent = sctp_skb2event(pos);
137
138	if (event->stream == cevent->stream &&
139	    event->mid == cevent->mid &&
140	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
141	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
142	      event->fsn > cevent->fsn))) {
143		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
144		return;
145	}
146
147	if ((event->stream == cevent->stream &&
148	     MID_lt(cevent->mid, event->mid)) ||
149	    event->stream > cevent->stream) {
150		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
151		return;
152	}
153
154	loc = NULL;
155	skb_queue_walk(&ulpq->reasm, pos) {
156		cevent = sctp_skb2event(pos);
157
158		if (event->stream < cevent->stream ||
159		    (event->stream == cevent->stream &&
160		     MID_lt(event->mid, cevent->mid))) {
161			loc = pos;
162			break;
163		}
164		if (event->stream == cevent->stream &&
165		    event->mid == cevent->mid &&
166		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
167		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
168		     event->fsn < cevent->fsn)) {
169			loc = pos;
170			break;
171		}
172	}
173
174	if (!loc)
175		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
176	else
177		__skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
178}
179
180static struct sctp_ulpevent *sctp_intl_retrieve_partial(
181						struct sctp_ulpq *ulpq,
182						struct sctp_ulpevent *event)
183{
184	struct sk_buff *first_frag = NULL;
185	struct sk_buff *last_frag = NULL;
186	struct sctp_ulpevent *retval;
187	struct sctp_stream_in *sin;
188	struct sk_buff *pos;
189	__u32 next_fsn = 0;
190	int is_last = 0;
191
192	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
193
194	skb_queue_walk(&ulpq->reasm, pos) {
195		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
196
197		if (cevent->stream < event->stream)
198			continue;
199
200		if (cevent->stream > event->stream ||
201		    cevent->mid != sin->mid)
202			break;
203
204		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
205		case SCTP_DATA_FIRST_FRAG:
206			goto out;
207		case SCTP_DATA_MIDDLE_FRAG:
208			if (!first_frag) {
209				if (cevent->fsn == sin->fsn) {
210					first_frag = pos;
211					last_frag = pos;
212					next_fsn = cevent->fsn + 1;
213				}
214			} else if (cevent->fsn == next_fsn) {
215				last_frag = pos;
216				next_fsn++;
217			} else {
218				goto out;
219			}
220			break;
221		case SCTP_DATA_LAST_FRAG:
222			if (!first_frag) {
223				if (cevent->fsn == sin->fsn) {
224					first_frag = pos;
225					last_frag = pos;
226					next_fsn = 0;
227					is_last = 1;
228				}
229			} else if (cevent->fsn == next_fsn) {
230				last_frag = pos;
231				next_fsn = 0;
232				is_last = 1;
233			}
234			goto out;
235		default:
236			goto out;
237		}
238	}
239
240out:
241	if (!first_frag)
242		return NULL;
243
244	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
245					     first_frag, last_frag);
246	if (retval) {
247		sin->fsn = next_fsn;
248		if (is_last) {
249			retval->msg_flags |= MSG_EOR;
250			sin->pd_mode = 0;
251		}
252	}
253
254	return retval;
255}
256
257static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
258						struct sctp_ulpq *ulpq,
259						struct sctp_ulpevent *event)
260{
261	struct sctp_association *asoc = ulpq->asoc;
262	struct sk_buff *pos, *first_frag = NULL;
263	struct sctp_ulpevent *retval = NULL;
264	struct sk_buff *pd_first = NULL;
265	struct sk_buff *pd_last = NULL;
266	struct sctp_stream_in *sin;
267	__u32 next_fsn = 0;
268	__u32 pd_point = 0;
269	__u32 pd_len = 0;
270	__u32 mid = 0;
271
272	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
273
274	skb_queue_walk(&ulpq->reasm, pos) {
275		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
276
277		if (cevent->stream < event->stream)
278			continue;
279		if (cevent->stream > event->stream)
280			break;
281
282		if (MID_lt(cevent->mid, event->mid))
283			continue;
284		if (MID_lt(event->mid, cevent->mid))
285			break;
286
287		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
288		case SCTP_DATA_FIRST_FRAG:
289			if (cevent->mid == sin->mid) {
290				pd_first = pos;
291				pd_last = pos;
292				pd_len = pos->len;
293			}
294
295			first_frag = pos;
296			next_fsn = 0;
297			mid = cevent->mid;
298			break;
299
300		case SCTP_DATA_MIDDLE_FRAG:
301			if (first_frag && cevent->mid == mid &&
302			    cevent->fsn == next_fsn) {
303				next_fsn++;
304				if (pd_first) {
305					pd_last = pos;
306					pd_len += pos->len;
307				}
308			} else {
309				first_frag = NULL;
310			}
311			break;
312
313		case SCTP_DATA_LAST_FRAG:
314			if (first_frag && cevent->mid == mid &&
315			    cevent->fsn == next_fsn)
316				goto found;
317			else
318				first_frag = NULL;
319			break;
320		}
321	}
322
323	if (!pd_first)
324		goto out;
325
326	pd_point = sctp_sk(asoc->base.sk)->pd_point;
327	if (pd_point && pd_point <= pd_len) {
328		retval = sctp_make_reassembled_event(asoc->base.net,
329						     &ulpq->reasm,
330						     pd_first, pd_last);
331		if (retval) {
332			sin->fsn = next_fsn;
333			sin->pd_mode = 1;
334		}
335	}
336	goto out;
337
338found:
339	retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm,
340					     first_frag, pos);
341	if (retval)
342		retval->msg_flags |= MSG_EOR;
343
344out:
345	return retval;
346}
347
348static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
349					     struct sctp_ulpevent *event)
350{
351	struct sctp_ulpevent *retval = NULL;
352	struct sctp_stream_in *sin;
353
354	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
355		event->msg_flags |= MSG_EOR;
356		return event;
357	}
358
359	sctp_intl_store_reasm(ulpq, event);
360
361	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
362	if (sin->pd_mode && event->mid == sin->mid &&
363	    event->fsn == sin->fsn)
364		retval = sctp_intl_retrieve_partial(ulpq, event);
365
366	if (!retval)
367		retval = sctp_intl_retrieve_reassembled(ulpq, event);
368
369	return retval;
370}
371
372static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
373				    struct sctp_ulpevent *event)
374{
375	struct sctp_ulpevent *cevent;
376	struct sk_buff *pos, *loc;
377
378	pos = skb_peek_tail(&ulpq->lobby);
379	if (!pos) {
380		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
381		return;
382	}
383
384	cevent = (struct sctp_ulpevent *)pos->cb;
385	if (event->stream == cevent->stream &&
386	    MID_lt(cevent->mid, event->mid)) {
387		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
388		return;
389	}
390
391	if (event->stream > cevent->stream) {
392		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
393		return;
394	}
395
396	loc = NULL;
397	skb_queue_walk(&ulpq->lobby, pos) {
398		cevent = (struct sctp_ulpevent *)pos->cb;
399
400		if (cevent->stream > event->stream) {
401			loc = pos;
402			break;
403		}
404		if (cevent->stream == event->stream &&
405		    MID_lt(event->mid, cevent->mid)) {
406			loc = pos;
407			break;
408		}
409	}
410
411	if (!loc)
412		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
413	else
414		__skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
415}
416
417static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
418				       struct sctp_ulpevent *event)
419{
420	struct sk_buff_head *event_list;
421	struct sctp_stream *stream;
422	struct sk_buff *pos, *tmp;
423	__u16 sid = event->stream;
424
425	stream  = &ulpq->asoc->stream;
426	event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
427
428	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
429		struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
430
431		if (cevent->stream > sid)
432			break;
433
434		if (cevent->stream < sid)
435			continue;
436
437		if (cevent->mid != sctp_mid_peek(stream, in, sid))
438			break;
439
440		sctp_mid_next(stream, in, sid);
441
442		__skb_unlink(pos, &ulpq->lobby);
443
444		__skb_queue_tail(event_list, pos);
445	}
446}
447
448static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
449					     struct sctp_ulpevent *event)
450{
451	struct sctp_stream *stream;
452	__u16 sid;
453
454	stream  = &ulpq->asoc->stream;
455	sid = event->stream;
456
457	if (event->mid != sctp_mid_peek(stream, in, sid)) {
458		sctp_intl_store_ordered(ulpq, event);
459		return NULL;
460	}
461
462	sctp_mid_next(stream, in, sid);
463
464	sctp_intl_retrieve_ordered(ulpq, event);
465
466	return event;
467}
468
469static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
470			      struct sk_buff_head *skb_list)
471{
472	struct sock *sk = ulpq->asoc->base.sk;
473	struct sctp_sock *sp = sctp_sk(sk);
474	struct sctp_ulpevent *event;
475	struct sk_buff *skb;
476
477	skb = __skb_peek(skb_list);
478	event = sctp_skb2event(skb);
479
480	if (sk->sk_shutdown & RCV_SHUTDOWN &&
481	    (sk->sk_shutdown & SEND_SHUTDOWN ||
482	     !sctp_ulpevent_is_notification(event)))
483		goto out_free;
484
485	if (!sctp_ulpevent_is_notification(event)) {
486		sk_mark_napi_id(sk, skb);
487		sk_incoming_cpu_update(sk);
488	}
489
490	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
491		goto out_free;
492
493	skb_queue_splice_tail_init(skb_list,
494				   &sk->sk_receive_queue);
495
496	if (!sp->data_ready_signalled) {
497		sp->data_ready_signalled = 1;
498		sk->sk_data_ready(sk);
499	}
500
501	return 1;
502
503out_free:
504	sctp_queue_purge_ulpevents(skb_list);
505
506	return 0;
507}
508
509static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
510				     struct sctp_ulpevent *event)
511{
512	struct sctp_ulpevent *cevent;
513	struct sk_buff *pos;
514
515	pos = skb_peek_tail(&ulpq->reasm_uo);
516	if (!pos) {
517		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
518		return;
519	}
520
521	cevent = sctp_skb2event(pos);
522
523	if (event->stream == cevent->stream &&
524	    event->mid == cevent->mid &&
525	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
526	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
527	      event->fsn > cevent->fsn))) {
528		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
529		return;
530	}
531
532	if ((event->stream == cevent->stream &&
533	     MID_lt(cevent->mid, event->mid)) ||
534	    event->stream > cevent->stream) {
535		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
536		return;
537	}
538
539	skb_queue_walk(&ulpq->reasm_uo, pos) {
540		cevent = sctp_skb2event(pos);
541
542		if (event->stream < cevent->stream ||
543		    (event->stream == cevent->stream &&
544		     MID_lt(event->mid, cevent->mid)))
545			break;
546
547		if (event->stream == cevent->stream &&
548		    event->mid == cevent->mid &&
549		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
550		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
551		     event->fsn < cevent->fsn))
552			break;
553	}
554
555	__skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
556}
557
558static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
559						struct sctp_ulpq *ulpq,
560						struct sctp_ulpevent *event)
561{
562	struct sk_buff *first_frag = NULL;
563	struct sk_buff *last_frag = NULL;
564	struct sctp_ulpevent *retval;
565	struct sctp_stream_in *sin;
566	struct sk_buff *pos;
567	__u32 next_fsn = 0;
568	int is_last = 0;
569
570	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
571
572	skb_queue_walk(&ulpq->reasm_uo, pos) {
573		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
574
575		if (cevent->stream < event->stream)
576			continue;
577		if (cevent->stream > event->stream)
578			break;
579
580		if (MID_lt(cevent->mid, sin->mid_uo))
581			continue;
582		if (MID_lt(sin->mid_uo, cevent->mid))
583			break;
584
585		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
586		case SCTP_DATA_FIRST_FRAG:
587			goto out;
588		case SCTP_DATA_MIDDLE_FRAG:
589			if (!first_frag) {
590				if (cevent->fsn == sin->fsn_uo) {
591					first_frag = pos;
592					last_frag = pos;
593					next_fsn = cevent->fsn + 1;
594				}
595			} else if (cevent->fsn == next_fsn) {
596				last_frag = pos;
597				next_fsn++;
598			} else {
599				goto out;
600			}
601			break;
602		case SCTP_DATA_LAST_FRAG:
603			if (!first_frag) {
604				if (cevent->fsn == sin->fsn_uo) {
605					first_frag = pos;
606					last_frag = pos;
607					next_fsn = 0;
608					is_last = 1;
609				}
610			} else if (cevent->fsn == next_fsn) {
611				last_frag = pos;
612				next_fsn = 0;
613				is_last = 1;
614			}
615			goto out;
616		default:
617			goto out;
618		}
619	}
620
621out:
622	if (!first_frag)
623		return NULL;
624
625	retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
626					     &ulpq->reasm_uo, first_frag,
627					     last_frag);
628	if (retval) {
629		sin->fsn_uo = next_fsn;
630		if (is_last) {
631			retval->msg_flags |= MSG_EOR;
632			sin->pd_mode_uo = 0;
633		}
634	}
635
636	return retval;
637}
638
639static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
640						struct sctp_ulpq *ulpq,
641						struct sctp_ulpevent *event)
642{
643	struct sctp_association *asoc = ulpq->asoc;
644	struct sk_buff *pos, *first_frag = NULL;
645	struct sctp_ulpevent *retval = NULL;
646	struct sk_buff *pd_first = NULL;
647	struct sk_buff *pd_last = NULL;
648	struct sctp_stream_in *sin;
649	__u32 next_fsn = 0;
650	__u32 pd_point = 0;
651	__u32 pd_len = 0;
652	__u32 mid = 0;
653
654	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
655
656	skb_queue_walk(&ulpq->reasm_uo, pos) {
657		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
658
659		if (cevent->stream < event->stream)
660			continue;
661		if (cevent->stream > event->stream)
662			break;
663
664		if (MID_lt(cevent->mid, event->mid))
665			continue;
666		if (MID_lt(event->mid, cevent->mid))
667			break;
668
669		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
670		case SCTP_DATA_FIRST_FRAG:
671			if (!sin->pd_mode_uo) {
672				sin->mid_uo = cevent->mid;
673				pd_first = pos;
674				pd_last = pos;
675				pd_len = pos->len;
676			}
677
678			first_frag = pos;
679			next_fsn = 0;
680			mid = cevent->mid;
681			break;
682
683		case SCTP_DATA_MIDDLE_FRAG:
684			if (first_frag && cevent->mid == mid &&
685			    cevent->fsn == next_fsn) {
686				next_fsn++;
687				if (pd_first) {
688					pd_last = pos;
689					pd_len += pos->len;
690				}
691			} else {
692				first_frag = NULL;
693			}
694			break;
695
696		case SCTP_DATA_LAST_FRAG:
697			if (first_frag && cevent->mid == mid &&
698			    cevent->fsn == next_fsn)
699				goto found;
700			else
701				first_frag = NULL;
702			break;
703		}
704	}
705
706	if (!pd_first)
707		goto out;
708
709	pd_point = sctp_sk(asoc->base.sk)->pd_point;
710	if (pd_point && pd_point <= pd_len) {
711		retval = sctp_make_reassembled_event(asoc->base.net,
712						     &ulpq->reasm_uo,
713						     pd_first, pd_last);
714		if (retval) {
715			sin->fsn_uo = next_fsn;
716			sin->pd_mode_uo = 1;
717		}
718	}
719	goto out;
720
721found:
722	retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo,
723					     first_frag, pos);
724	if (retval)
725		retval->msg_flags |= MSG_EOR;
726
727out:
728	return retval;
729}
730
731static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
732						struct sctp_ulpevent *event)
733{
734	struct sctp_ulpevent *retval = NULL;
735	struct sctp_stream_in *sin;
736
737	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
738		event->msg_flags |= MSG_EOR;
739		return event;
740	}
741
742	sctp_intl_store_reasm_uo(ulpq, event);
743
744	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
745	if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
746	    event->fsn == sin->fsn_uo)
747		retval = sctp_intl_retrieve_partial_uo(ulpq, event);
748
749	if (!retval)
750		retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
751
752	return retval;
753}
754
755static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
756{
757	struct sctp_stream_in *csin, *sin = NULL;
758	struct sk_buff *first_frag = NULL;
759	struct sk_buff *last_frag = NULL;
760	struct sctp_ulpevent *retval;
761	struct sk_buff *pos;
762	__u32 next_fsn = 0;
763	__u16 sid = 0;
764
765	skb_queue_walk(&ulpq->reasm_uo, pos) {
766		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
767
768		csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
769		if (csin->pd_mode_uo)
770			continue;
771
772		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
773		case SCTP_DATA_FIRST_FRAG:
774			if (first_frag)
775				goto out;
776			first_frag = pos;
777			last_frag = pos;
778			next_fsn = 0;
779			sin = csin;
780			sid = cevent->stream;
781			sin->mid_uo = cevent->mid;
782			break;
783		case SCTP_DATA_MIDDLE_FRAG:
784			if (!first_frag)
785				break;
786			if (cevent->stream == sid &&
787			    cevent->mid == sin->mid_uo &&
788			    cevent->fsn == next_fsn) {
789				next_fsn++;
790				last_frag = pos;
791			} else {
792				goto out;
793			}
794			break;
795		case SCTP_DATA_LAST_FRAG:
796			if (first_frag)
797				goto out;
798			break;
799		default:
800			break;
801		}
802	}
803
804	if (!first_frag)
805		return NULL;
806
807out:
808	retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
809					     &ulpq->reasm_uo, first_frag,
810					     last_frag);
811	if (retval) {
812		sin->fsn_uo = next_fsn;
813		sin->pd_mode_uo = 1;
814	}
815
816	return retval;
817}
818
819static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
820			       struct sctp_chunk *chunk, gfp_t gfp)
821{
822	struct sctp_ulpevent *event;
823	struct sk_buff_head temp;
824	int event_eor = 0;
825
826	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
827	if (!event)
828		return -ENOMEM;
829
830	event->mid = ntohl(chunk->subh.idata_hdr->mid);
831	if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
832		event->ppid = chunk->subh.idata_hdr->ppid;
833	else
834		event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
835
836	if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
837		event = sctp_intl_reasm(ulpq, event);
838		if (event) {
839			skb_queue_head_init(&temp);
840			__skb_queue_tail(&temp, sctp_event2skb(event));
841
842			if (event->msg_flags & MSG_EOR)
843				event = sctp_intl_order(ulpq, event);
844		}
845	} else {
846		event = sctp_intl_reasm_uo(ulpq, event);
847		if (event) {
848			skb_queue_head_init(&temp);
849			__skb_queue_tail(&temp, sctp_event2skb(event));
850		}
851	}
852
853	if (event) {
854		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
855		sctp_enqueue_event(ulpq, &temp);
856	}
857
858	return event_eor;
859}
860
861static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
862{
863	struct sctp_stream_in *csin, *sin = NULL;
864	struct sk_buff *first_frag = NULL;
865	struct sk_buff *last_frag = NULL;
866	struct sctp_ulpevent *retval;
867	struct sk_buff *pos;
868	__u32 next_fsn = 0;
869	__u16 sid = 0;
870
871	skb_queue_walk(&ulpq->reasm, pos) {
872		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
873
874		csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
875		if (csin->pd_mode)
876			continue;
877
878		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
879		case SCTP_DATA_FIRST_FRAG:
880			if (first_frag)
881				goto out;
882			if (cevent->mid == csin->mid) {
883				first_frag = pos;
884				last_frag = pos;
885				next_fsn = 0;
886				sin = csin;
887				sid = cevent->stream;
888			}
889			break;
890		case SCTP_DATA_MIDDLE_FRAG:
891			if (!first_frag)
892				break;
893			if (cevent->stream == sid &&
894			    cevent->mid == sin->mid &&
895			    cevent->fsn == next_fsn) {
896				next_fsn++;
897				last_frag = pos;
898			} else {
899				goto out;
900			}
901			break;
902		case SCTP_DATA_LAST_FRAG:
903			if (first_frag)
904				goto out;
905			break;
906		default:
907			break;
908		}
909	}
910
911	if (!first_frag)
912		return NULL;
913
914out:
915	retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
916					     &ulpq->reasm, first_frag,
917					     last_frag);
918	if (retval) {
919		sin->fsn = next_fsn;
920		sin->pd_mode = 1;
921	}
922
923	return retval;
924}
925
926static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
927{
928	struct sctp_ulpevent *event;
929	struct sk_buff_head temp;
930
931	if (!skb_queue_empty(&ulpq->reasm)) {
932		do {
933			event = sctp_intl_retrieve_first(ulpq);
934			if (event) {
935				skb_queue_head_init(&temp);
936				__skb_queue_tail(&temp, sctp_event2skb(event));
937				sctp_enqueue_event(ulpq, &temp);
938			}
939		} while (event);
940	}
941
942	if (!skb_queue_empty(&ulpq->reasm_uo)) {
943		do {
944			event = sctp_intl_retrieve_first_uo(ulpq);
945			if (event) {
946				skb_queue_head_init(&temp);
947				__skb_queue_tail(&temp, sctp_event2skb(event));
948				sctp_enqueue_event(ulpq, &temp);
949			}
950		} while (event);
951	}
952}
953
954static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
955			       gfp_t gfp)
956{
957	struct sctp_association *asoc = ulpq->asoc;
958	__u32 freed = 0;
959	__u16 needed;
960
961	needed = ntohs(chunk->chunk_hdr->length) -
962		 sizeof(struct sctp_idata_chunk);
963
964	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
965		freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
966		if (freed < needed)
967			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
968						       needed);
969		if (freed < needed)
970			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
971						       needed);
972	}
973
974	if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
975		sctp_intl_start_pd(ulpq, gfp);
976}
977
978static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
979				      __u32 mid, __u16 flags, gfp_t gfp)
980{
981	struct sock *sk = ulpq->asoc->base.sk;
982	struct sctp_ulpevent *ev = NULL;
983
984	if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
985					SCTP_PARTIAL_DELIVERY_EVENT))
986		return;
987
988	ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
989				      sid, mid, flags, gfp);
990	if (ev) {
991		struct sctp_sock *sp = sctp_sk(sk);
992
993		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
994
995		if (!sp->data_ready_signalled) {
996			sp->data_ready_signalled = 1;
997			sk->sk_data_ready(sk);
998		}
999	}
1000}
1001
1002static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1003{
1004	struct sctp_stream *stream = &ulpq->asoc->stream;
1005	struct sctp_ulpevent *cevent, *event = NULL;
1006	struct sk_buff_head *lobby = &ulpq->lobby;
1007	struct sk_buff *pos, *tmp;
1008	struct sk_buff_head temp;
1009	__u16 csid;
1010	__u32 cmid;
1011
1012	skb_queue_head_init(&temp);
1013	sctp_skb_for_each(pos, lobby, tmp) {
1014		cevent = (struct sctp_ulpevent *)pos->cb;
1015		csid = cevent->stream;
1016		cmid = cevent->mid;
1017
1018		if (csid > sid)
1019			break;
1020
1021		if (csid < sid)
1022			continue;
1023
1024		if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1025			break;
1026
1027		__skb_unlink(pos, lobby);
1028		if (!event)
1029			event = sctp_skb2event(pos);
1030
1031		__skb_queue_tail(&temp, pos);
1032	}
1033
1034	if (!event && pos != (struct sk_buff *)lobby) {
1035		cevent = (struct sctp_ulpevent *)pos->cb;
1036		csid = cevent->stream;
1037		cmid = cevent->mid;
1038
1039		if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1040			sctp_mid_next(stream, in, csid);
1041			__skb_unlink(pos, lobby);
1042			__skb_queue_tail(&temp, pos);
1043			event = sctp_skb2event(pos);
1044		}
1045	}
1046
1047	if (event) {
1048		sctp_intl_retrieve_ordered(ulpq, event);
1049		sctp_enqueue_event(ulpq, &temp);
1050	}
1051}
1052
1053static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1054{
1055	struct sctp_stream *stream = &ulpq->asoc->stream;
1056	__u16 sid;
1057
1058	for (sid = 0; sid < stream->incnt; sid++) {
1059		struct sctp_stream_in *sin = SCTP_SI(stream, sid);
1060		__u32 mid;
1061
1062		if (sin->pd_mode_uo) {
1063			sin->pd_mode_uo = 0;
1064
1065			mid = sin->mid_uo;
1066			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1067		}
1068
1069		if (sin->pd_mode) {
1070			sin->pd_mode = 0;
1071
1072			mid = sin->mid;
1073			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1074			sctp_mid_skip(stream, in, sid, mid);
1075
1076			sctp_intl_reap_ordered(ulpq, sid);
1077		}
1078	}
1079
1080	/* intl abort pd happens only when all data needs to be cleaned */
1081	sctp_ulpq_flush(ulpq);
1082}
1083
1084static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1085				    int nskips, __be16 stream, __u8 flags)
1086{
1087	int i;
1088
1089	for (i = 0; i < nskips; i++)
1090		if (skiplist[i].stream == stream &&
1091		    skiplist[i].flags == flags)
1092			return i;
1093
1094	return i;
1095}
1096
1097#define SCTP_FTSN_U_BIT	0x1
1098static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1099{
1100	struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1101	struct sctp_association *asoc = q->asoc;
1102	struct sctp_chunk *ftsn_chunk = NULL;
1103	struct list_head *lchunk, *temp;
1104	int nskips = 0, skip_pos;
1105	struct sctp_chunk *chunk;
1106	__u32 tsn;
1107
1108	if (!asoc->peer.prsctp_capable)
1109		return;
1110
1111	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1112		asoc->adv_peer_ack_point = ctsn;
1113
1114	list_for_each_safe(lchunk, temp, &q->abandoned) {
1115		chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1116		tsn = ntohl(chunk->subh.data_hdr->tsn);
1117
1118		if (TSN_lte(tsn, ctsn)) {
1119			list_del_init(lchunk);
1120			sctp_chunk_free(chunk);
1121		} else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1122			__be16 sid = chunk->subh.idata_hdr->stream;
1123			__be32 mid = chunk->subh.idata_hdr->mid;
1124			__u8 flags = 0;
1125
1126			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1127				flags |= SCTP_FTSN_U_BIT;
1128
1129			asoc->adv_peer_ack_point = tsn;
1130			skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1131						     sid, flags);
1132			ftsn_skip_arr[skip_pos].stream = sid;
1133			ftsn_skip_arr[skip_pos].reserved = 0;
1134			ftsn_skip_arr[skip_pos].flags = flags;
1135			ftsn_skip_arr[skip_pos].mid = mid;
1136			if (skip_pos == nskips)
1137				nskips++;
1138			if (nskips == 10)
1139				break;
1140		} else {
1141			break;
1142		}
1143	}
1144
1145	if (asoc->adv_peer_ack_point > ctsn)
1146		ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1147					       nskips, &ftsn_skip_arr[0]);
1148
1149	if (ftsn_chunk) {
1150		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1151		SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
1152	}
1153}
1154
1155#define _sctp_walk_ifwdtsn(pos, chunk, end) \
1156	for (pos = (void *)(chunk->subh.ifwdtsn_hdr + 1); \
1157	     (void *)pos <= (void *)(chunk->subh.ifwdtsn_hdr + 1) + (end) - \
1158			    sizeof(struct sctp_ifwdtsn_skip); pos++)
1159
1160#define sctp_walk_ifwdtsn(pos, ch) \
1161	_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1162					sizeof(struct sctp_ifwdtsn_chunk))
1163
1164static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1165{
1166	struct sctp_fwdtsn_skip *skip;
1167	__u16 incnt;
1168
1169	if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1170		return false;
1171
1172	incnt = chunk->asoc->stream.incnt;
1173	sctp_walk_fwdtsn(skip, chunk)
1174		if (ntohs(skip->stream) >= incnt)
1175			return false;
1176
1177	return true;
1178}
1179
1180static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1181{
1182	struct sctp_ifwdtsn_skip *skip;
1183	__u16 incnt;
1184
1185	if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1186		return false;
1187
1188	incnt = chunk->asoc->stream.incnt;
1189	sctp_walk_ifwdtsn(skip, chunk)
1190		if (ntohs(skip->stream) >= incnt)
1191			return false;
1192
1193	return true;
1194}
1195
1196static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1197{
1198	/* Move the Cumulattive TSN Ack ahead. */
1199	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1200	/* purge the fragmentation queue */
1201	sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1202	/* Abort any in progress partial delivery. */
1203	sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1204}
1205
1206static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1207{
1208	struct sk_buff *pos, *tmp;
1209
1210	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1211		struct sctp_ulpevent *event = sctp_skb2event(pos);
1212		__u32 tsn = event->tsn;
1213
1214		if (TSN_lte(tsn, ftsn)) {
1215			__skb_unlink(pos, &ulpq->reasm);
1216			sctp_ulpevent_free(event);
1217		}
1218	}
1219
1220	skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1221		struct sctp_ulpevent *event = sctp_skb2event(pos);
1222		__u32 tsn = event->tsn;
1223
1224		if (TSN_lte(tsn, ftsn)) {
1225			__skb_unlink(pos, &ulpq->reasm_uo);
1226			sctp_ulpevent_free(event);
1227		}
1228	}
1229}
1230
1231static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1232{
1233	/* Move the Cumulattive TSN Ack ahead. */
1234	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1235	/* purge the fragmentation queue */
1236	sctp_intl_reasm_flushtsn(ulpq, ftsn);
1237	/* abort only when it's for all data */
1238	if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1239		sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1240}
1241
1242static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1243{
1244	struct sctp_fwdtsn_skip *skip;
1245
1246	/* Walk through all the skipped SSNs */
1247	sctp_walk_fwdtsn(skip, chunk)
1248		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1249}
1250
1251static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1252			   __u8 flags)
1253{
1254	struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
1255	struct sctp_stream *stream  = &ulpq->asoc->stream;
1256
1257	if (flags & SCTP_FTSN_U_BIT) {
1258		if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1259			sin->pd_mode_uo = 0;
1260			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1261						  GFP_ATOMIC);
1262		}
1263		return;
1264	}
1265
1266	if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1267		return;
1268
1269	if (sin->pd_mode) {
1270		sin->pd_mode = 0;
1271		sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1272	}
1273
1274	sctp_mid_skip(stream, in, sid, mid);
1275
1276	sctp_intl_reap_ordered(ulpq, sid);
1277}
1278
1279static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1280{
1281	struct sctp_ifwdtsn_skip *skip;
1282
1283	/* Walk through all the skipped MIDs and abort stream pd if possible */
1284	sctp_walk_ifwdtsn(skip, chunk)
1285		sctp_intl_skip(ulpq, ntohs(skip->stream),
1286			       ntohl(skip->mid), skip->flags);
1287}
1288
1289static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
1290{
1291	struct sk_buff_head temp;
1292
1293	skb_queue_head_init(&temp);
1294	__skb_queue_tail(&temp, sctp_event2skb(event));
1295	return sctp_ulpq_tail_event(ulpq, &temp);
1296}
1297
1298static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1299	.data_chunk_len		= sizeof(struct sctp_data_chunk),
1300	.ftsn_chunk_len		= sizeof(struct sctp_fwdtsn_chunk),
1301	/* DATA process functions */
1302	.make_datafrag		= sctp_make_datafrag_empty,
1303	.assign_number		= sctp_chunk_assign_ssn,
1304	.validate_data		= sctp_validate_data,
1305	.ulpevent_data		= sctp_ulpq_tail_data,
1306	.enqueue_event		= do_ulpq_tail_event,
1307	.renege_events		= sctp_ulpq_renege,
1308	.start_pd		= sctp_ulpq_partial_delivery,
1309	.abort_pd		= sctp_ulpq_abort_pd,
1310	/* FORWARD-TSN process functions */
1311	.generate_ftsn		= sctp_generate_fwdtsn,
1312	.validate_ftsn		= sctp_validate_fwdtsn,
1313	.report_ftsn		= sctp_report_fwdtsn,
1314	.handle_ftsn		= sctp_handle_fwdtsn,
1315};
1316
1317static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
1318				 struct sctp_ulpevent *event)
1319{
1320	struct sk_buff_head temp;
1321
1322	skb_queue_head_init(&temp);
1323	__skb_queue_tail(&temp, sctp_event2skb(event));
1324	return sctp_enqueue_event(ulpq, &temp);
1325}
1326
1327static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1328	.data_chunk_len		= sizeof(struct sctp_idata_chunk),
1329	.ftsn_chunk_len		= sizeof(struct sctp_ifwdtsn_chunk),
1330	/* I-DATA process functions */
1331	.make_datafrag		= sctp_make_idatafrag_empty,
1332	.assign_number		= sctp_chunk_assign_mid,
1333	.validate_data		= sctp_validate_idata,
1334	.ulpevent_data		= sctp_ulpevent_idata,
1335	.enqueue_event		= do_sctp_enqueue_event,
1336	.renege_events		= sctp_renege_events,
1337	.start_pd		= sctp_intl_start_pd,
1338	.abort_pd		= sctp_intl_abort_pd,
1339	/* I-FORWARD-TSN process functions */
1340	.generate_ftsn		= sctp_generate_iftsn,
1341	.validate_ftsn		= sctp_validate_iftsn,
1342	.report_ftsn		= sctp_report_iftsn,
1343	.handle_ftsn		= sctp_handle_iftsn,
1344};
1345
1346void sctp_stream_interleave_init(struct sctp_stream *stream)
1347{
1348	struct sctp_association *asoc;
1349
1350	asoc = container_of(stream, struct sctp_association, stream);
1351	stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1
1352					     : &sctp_stream_interleave_0;
1353}
1354