sctp_indata.c revision 267732
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctp_indata.c 267732 2014-06-22 16:43:59Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_indata.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_timer.h>
47
48
49/*
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
53 *
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
56 * the list.
57 */
58
59void
60sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61{
62	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63}
64
65/* Calculate what the rwnd would be */
66uint32_t
67sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68{
69	uint32_t calc = 0;
70
71	/*
72	 * This is really set wrong with respect to a 1-2-m socket. Since
73	 * the sb_cc is the count that everyone as put up. When we re-write
74	 * sctp_soreceive then we will fix this so that ONLY this
75	 * associations data is taken into account.
76	 */
77	if (stcb->sctp_socket == NULL)
78		return (calc);
79
80	if (stcb->asoc.sb_cc == 0 &&
81	    asoc->size_on_reasm_queue == 0 &&
82	    asoc->size_on_all_streams == 0) {
83		/* Full rwnd granted */
84		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85		return (calc);
86	}
87	/* get actual space */
88	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89
90	/*
91	 * take out what has NOT been put on socket queue and we yet hold
92	 * for putting up.
93	 */
94	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95	    asoc->cnt_on_reasm_queue * MSIZE));
96	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97	    asoc->cnt_on_all_streams * MSIZE));
98
99	if (calc == 0) {
100		/* out of space */
101		return (calc);
102	}
103	/* what is the overhead of all these rwnd's */
104	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105	/*
106	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107	 * even it is 0. SWS engaged
108	 */
109	if (calc < stcb->asoc.my_rwnd_control_len) {
110		calc = 1;
111	}
112	return (calc);
113}
114
115
116
117/*
118 * Build out our readq entry based on the incoming packet.
119 */
120struct sctp_queued_to_read *
121sctp_build_readq_entry(struct sctp_tcb *stcb,
122    struct sctp_nets *net,
123    uint32_t tsn, uint32_t ppid,
124    uint32_t context, uint16_t stream_no,
125    uint16_t stream_seq, uint8_t flags,
126    struct mbuf *dm)
127{
128	struct sctp_queued_to_read *read_queue_e = NULL;
129
130	sctp_alloc_a_readq(stcb, read_queue_e);
131	if (read_queue_e == NULL) {
132		goto failed_build;
133	}
134	read_queue_e->sinfo_stream = stream_no;
135	read_queue_e->sinfo_ssn = stream_seq;
136	read_queue_e->sinfo_flags = (flags << 8);
137	read_queue_e->sinfo_ppid = ppid;
138	read_queue_e->sinfo_context = context;
139	read_queue_e->sinfo_timetolive = 0;
140	read_queue_e->sinfo_tsn = tsn;
141	read_queue_e->sinfo_cumtsn = tsn;
142	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143	read_queue_e->whoFrom = net;
144	read_queue_e->length = 0;
145	atomic_add_int(&net->ref_count, 1);
146	read_queue_e->data = dm;
147	read_queue_e->spec_flags = 0;
148	read_queue_e->tail_mbuf = NULL;
149	read_queue_e->aux_data = NULL;
150	read_queue_e->stcb = stcb;
151	read_queue_e->port_from = stcb->rport;
152	read_queue_e->do_not_ref_stcb = 0;
153	read_queue_e->end_added = 0;
154	read_queue_e->some_taken = 0;
155	read_queue_e->pdapi_aborted = 0;
156failed_build:
157	return (read_queue_e);
158}
159
160
161/*
162 * Build out our readq entry based on the incoming packet.
163 */
164static struct sctp_queued_to_read *
165sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166    struct sctp_tmit_chunk *chk)
167{
168	struct sctp_queued_to_read *read_queue_e = NULL;
169
170	sctp_alloc_a_readq(stcb, read_queue_e);
171	if (read_queue_e == NULL) {
172		goto failed_build;
173	}
174	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178	read_queue_e->sinfo_context = stcb->asoc.context;
179	read_queue_e->sinfo_timetolive = 0;
180	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183	read_queue_e->whoFrom = chk->whoTo;
184	read_queue_e->aux_data = NULL;
185	read_queue_e->length = 0;
186	atomic_add_int(&chk->whoTo->ref_count, 1);
187	read_queue_e->data = chk->data;
188	read_queue_e->tail_mbuf = NULL;
189	read_queue_e->stcb = stcb;
190	read_queue_e->port_from = stcb->rport;
191	read_queue_e->spec_flags = 0;
192	read_queue_e->do_not_ref_stcb = 0;
193	read_queue_e->end_added = 0;
194	read_queue_e->some_taken = 0;
195	read_queue_e->pdapi_aborted = 0;
196failed_build:
197	return (read_queue_e);
198}
199
200
201struct mbuf *
202sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203{
204	struct sctp_extrcvinfo *seinfo;
205	struct sctp_sndrcvinfo *outinfo;
206	struct sctp_rcvinfo *rcvinfo;
207	struct sctp_nxtinfo *nxtinfo;
208	struct cmsghdr *cmh;
209	struct mbuf *ret;
210	int len;
211	int use_extended;
212	int provide_nxt;
213
214	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217		/* user does not want any ancillary data */
218		return (NULL);
219	}
220	len = 0;
221	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223	}
224	seinfo = (struct sctp_extrcvinfo *)sinfo;
225	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227		provide_nxt = 1;
228		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229	} else {
230		provide_nxt = 0;
231	}
232	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234			use_extended = 1;
235			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236		} else {
237			use_extended = 0;
238			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239		}
240	} else {
241		use_extended = 0;
242	}
243
244	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245	if (ret == NULL) {
246		/* No space */
247		return (ret);
248	}
249	SCTP_BUF_LEN(ret) = 0;
250
251	/* We need a CMSG header followed by the struct */
252	cmh = mtod(ret, struct cmsghdr *);
253	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254		cmh->cmsg_level = IPPROTO_SCTP;
255		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256		cmh->cmsg_type = SCTP_RCVINFO;
257		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258		rcvinfo->rcv_sid = sinfo->sinfo_stream;
259		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260		rcvinfo->rcv_flags = sinfo->sinfo_flags;
261		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264		rcvinfo->rcv_context = sinfo->sinfo_context;
265		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
268	}
269	if (provide_nxt) {
270		cmh->cmsg_level = IPPROTO_SCTP;
271		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272		cmh->cmsg_type = SCTP_NXTINFO;
273		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275		nxtinfo->nxt_flags = 0;
276		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277			nxtinfo->nxt_flags |= SCTP_UNORDERED;
278		}
279		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
281		}
282		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283			nxtinfo->nxt_flags |= SCTP_COMPLETE;
284		}
285		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290	}
291	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292		cmh->cmsg_level = IPPROTO_SCTP;
293		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294		if (use_extended) {
295			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296			cmh->cmsg_type = SCTP_EXTRCV;
297			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299		} else {
300			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301			cmh->cmsg_type = SCTP_SNDRCV;
302			*outinfo = *sinfo;
303			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304		}
305	}
306	return (ret);
307}
308
309
310static void
311sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312{
313	uint32_t gap, i, cumackp1;
314	int fnd = 0;
315
316	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317		return;
318	}
319	cumackp1 = asoc->cumulative_tsn + 1;
320	if (SCTP_TSN_GT(cumackp1, tsn)) {
321		/*
322		 * this tsn is behind the cum ack and thus we don't need to
323		 * worry about it being moved from one to the other.
324		 */
325		return;
326	}
327	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330		sctp_print_mapping_array(asoc);
331#ifdef INVARIANTS
332		panic("Things are really messed up now!!");
333#endif
334	}
335	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338		asoc->highest_tsn_inside_nr_map = tsn;
339	}
340	if (tsn == asoc->highest_tsn_inside_map) {
341		/* We must back down to see what the new highest is */
342		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345				asoc->highest_tsn_inside_map = i;
346				fnd = 1;
347				break;
348			}
349		}
350		if (!fnd) {
351			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
352		}
353	}
354}
355
356
357/*
358 * We are delivering currently from the reassembly queue. We must continue to
359 * deliver until we either: 1) run out of space. 2) run out of sequential
360 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
361 */
362static void
363sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
364{
365	struct sctp_tmit_chunk *chk, *nchk;
366	uint16_t nxt_todel;
367	uint16_t stream_no;
368	int end = 0;
369	int cntDel;
370	struct sctp_queued_to_read *control, *ctl, *nctl;
371
372	if (stcb == NULL)
373		return;
374
375	cntDel = stream_no = 0;
376	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379		/* socket above is long gone or going.. */
380abandon:
381		asoc->fragmented_delivery_inprogress = 0;
382		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384			asoc->size_on_reasm_queue -= chk->send_size;
385			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
386			/*
387			 * Lose the data pointer, since its in the socket
388			 * buffer
389			 */
390			if (chk->data) {
391				sctp_m_freem(chk->data);
392				chk->data = NULL;
393			}
394			/* Now free the address and data */
395			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396			/* sa_ignore FREED_MEMORY */
397		}
398		return;
399	}
400	SCTP_TCB_LOCK_ASSERT(stcb);
401	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403			/* Can't deliver more :< */
404			return;
405		}
406		stream_no = chk->rec.data.stream_number;
407		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408		if (nxt_todel != chk->rec.data.stream_seq &&
409		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
410			/*
411			 * Not the next sequence to deliver in its stream OR
412			 * unordered
413			 */
414			return;
415		}
416		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
417
418			control = sctp_build_readq_entry_chk(stcb, chk);
419			if (control == NULL) {
420				/* out of memory? */
421				return;
422			}
423			/* save it off for our future deliveries */
424			stcb->asoc.control_pdapi = control;
425			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
426				end = 1;
427			else
428				end = 0;
429			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430			sctp_add_to_readq(stcb->sctp_ep,
431			    stcb, control, &stcb->sctp_socket->so_rcv, end,
432			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
433			cntDel++;
434		} else {
435			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
436				end = 1;
437			else
438				end = 0;
439			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441			    stcb->asoc.control_pdapi,
442			    chk->data, end, chk->rec.data.TSN_seq,
443			    &stcb->sctp_socket->so_rcv)) {
444				/*
445				 * something is very wrong, either
446				 * control_pdapi is NULL, or the tail_mbuf
447				 * is corrupt, or there is a EOM already on
448				 * the mbuf chain.
449				 */
450				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
451					goto abandon;
452				} else {
453#ifdef INVARIANTS
454					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455						panic("This should not happen control_pdapi NULL?");
456					}
457					/* if we did not panic, it was a EOM */
458					panic("Bad chunking ??");
459#else
460					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
462					}
463					SCTP_PRINTF("Bad chunking ??\n");
464					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
465
466#endif
467					goto abandon;
468				}
469			}
470			cntDel++;
471		}
472		/* pull it we did it */
473		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475			asoc->fragmented_delivery_inprogress = 0;
476			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477				asoc->strmin[stream_no].last_sequence_delivered++;
478			}
479			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
481			}
482		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
483			/*
484			 * turn the flag back on since we just  delivered
485			 * yet another one.
486			 */
487			asoc->fragmented_delivery_inprogress = 1;
488		}
489		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
493
494		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495		asoc->size_on_reasm_queue -= chk->send_size;
496		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497		/* free up the chk */
498		chk->data = NULL;
499		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
500
501		if (asoc->fragmented_delivery_inprogress == 0) {
502			/*
503			 * Now lets see if we can deliver the next one on
504			 * the stream
505			 */
506			struct sctp_stream_in *strm;
507
508			strm = &asoc->strmin[stream_no];
509			nxt_todel = strm->last_sequence_delivered + 1;
510			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511				/* Deliver more if we can. */
512				if (nxt_todel == ctl->sinfo_ssn) {
513					TAILQ_REMOVE(&strm->inqueue, ctl, next);
514					asoc->size_on_all_streams -= ctl->length;
515					sctp_ucount_decr(asoc->cnt_on_all_streams);
516					strm->last_sequence_delivered++;
517					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518					sctp_add_to_readq(stcb->sctp_ep, stcb,
519					    ctl,
520					    &stcb->sctp_socket->so_rcv, 1,
521					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
522				} else {
523					break;
524				}
525				nxt_todel = strm->last_sequence_delivered + 1;
526			}
527			break;
528		}
529	}
530}
531
532/*
533 * Queue the chunk either right into the socket buffer if it is the next one
534 * to go OR put it in the correct place in the delivery queue.  If we do
535 * append to the so_buf, keep doing so until we are out of order. One big
536 * question still remains, what to do when the socket buffer is FULL??
537 */
538static void
539sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540    struct sctp_queued_to_read *control, int *abort_flag)
541{
542	/*
543	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544	 * all the data in one stream this could happen quite rapidly. One
545	 * could use the TSN to keep track of things, but this scheme breaks
546	 * down in the other type of stream useage that could occur. Send a
547	 * single msg to stream 0, send 4Billion messages to stream 1, now
548	 * send a message to stream 0. You have a situation where the TSN
549	 * has wrapped but not in the stream. Is this worth worrying about
550	 * or should we just change our queue sort at the bottom to be by
551	 * TSN.
552	 *
553	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555	 * assignment this could happen... and I don't see how this would be
556	 * a violation. So for now I am undecided an will leave the sort by
557	 * SSN alone. Maybe a hybred approach is the answer
558	 *
559	 */
560	struct sctp_stream_in *strm;
561	struct sctp_queued_to_read *at;
562	int queue_needed;
563	uint16_t nxt_todel;
564	struct mbuf *op_err;
565	char msg[SCTP_DIAG_INFO_LEN];
566
567	queue_needed = 1;
568	asoc->size_on_all_streams += control->length;
569	sctp_ucount_incr(asoc->cnt_on_all_streams);
570	strm = &asoc->strmin[control->sinfo_stream];
571	nxt_todel = strm->last_sequence_delivered + 1;
572	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
573		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
574	}
575	SCTPDBG(SCTP_DEBUG_INDATA1,
576	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
577	    (uint32_t) control->sinfo_stream,
578	    (uint32_t) strm->last_sequence_delivered,
579	    (uint32_t) nxt_todel);
580	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
581		/* The incoming sseq is behind where we last delivered? */
582		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
583		    control->sinfo_ssn, strm->last_sequence_delivered);
584protocol_error:
585		/*
586		 * throw it in the stream so it gets cleaned up in
587		 * association destruction
588		 */
589		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
590		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
591		    strm->last_sequence_delivered, control->sinfo_tsn,
592		    control->sinfo_stream, control->sinfo_ssn);
593		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
594		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
595		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
596		*abort_flag = 1;
597		return;
598
599	}
600	if (nxt_todel == control->sinfo_ssn) {
601		/* can be delivered right away? */
602		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
603			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
604		}
605		/* EY it wont be queued if it could be delivered directly */
606		queue_needed = 0;
607		asoc->size_on_all_streams -= control->length;
608		sctp_ucount_decr(asoc->cnt_on_all_streams);
609		strm->last_sequence_delivered++;
610
611		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
612		sctp_add_to_readq(stcb->sctp_ep, stcb,
613		    control,
614		    &stcb->sctp_socket->so_rcv, 1,
615		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
616		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
617			/* all delivered */
618			nxt_todel = strm->last_sequence_delivered + 1;
619			if (nxt_todel == control->sinfo_ssn) {
620				TAILQ_REMOVE(&strm->inqueue, control, next);
621				asoc->size_on_all_streams -= control->length;
622				sctp_ucount_decr(asoc->cnt_on_all_streams);
623				strm->last_sequence_delivered++;
624				/*
625				 * We ignore the return of deliver_data here
626				 * since we always can hold the chunk on the
627				 * d-queue. And we have a finite number that
628				 * can be delivered from the strq.
629				 */
630				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
631					sctp_log_strm_del(control, NULL,
632					    SCTP_STR_LOG_FROM_IMMED_DEL);
633				}
634				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
635				sctp_add_to_readq(stcb->sctp_ep, stcb,
636				    control,
637				    &stcb->sctp_socket->so_rcv, 1,
638				    SCTP_READ_LOCK_NOT_HELD,
639				    SCTP_SO_NOT_LOCKED);
640				continue;
641			}
642			break;
643		}
644	}
645	if (queue_needed) {
646		/*
647		 * Ok, we did not deliver this guy, find the correct place
648		 * to put it on the queue.
649		 */
650		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
651			goto protocol_error;
652		}
653		if (TAILQ_EMPTY(&strm->inqueue)) {
654			/* Empty queue */
655			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
656				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
657			}
658			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
659		} else {
660			TAILQ_FOREACH(at, &strm->inqueue, next) {
661				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
662					/*
663					 * one in queue is bigger than the
664					 * new one, insert before this one
665					 */
666					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
667						sctp_log_strm_del(control, at,
668						    SCTP_STR_LOG_FROM_INSERT_MD);
669					}
670					TAILQ_INSERT_BEFORE(at, control, next);
671					break;
672				} else if (at->sinfo_ssn == control->sinfo_ssn) {
673					/*
674					 * Gak, He sent me a duplicate str
675					 * seq number
676					 */
677					/*
678					 * foo bar, I guess I will just free
679					 * this new guy, should we abort
680					 * too? FIX ME MAYBE? Or it COULD be
681					 * that the SSN's have wrapped.
682					 * Maybe I should compare to TSN
683					 * somehow... sigh for now just blow
684					 * away the chunk!
685					 */
686
687					if (control->data)
688						sctp_m_freem(control->data);
689					control->data = NULL;
690					asoc->size_on_all_streams -= control->length;
691					sctp_ucount_decr(asoc->cnt_on_all_streams);
692					if (control->whoFrom) {
693						sctp_free_remote_addr(control->whoFrom);
694						control->whoFrom = NULL;
695					}
696					sctp_free_a_readq(stcb, control);
697					return;
698				} else {
699					if (TAILQ_NEXT(at, next) == NULL) {
700						/*
701						 * We are at the end, insert
702						 * it after this one
703						 */
704						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
705							sctp_log_strm_del(control, at,
706							    SCTP_STR_LOG_FROM_INSERT_TL);
707						}
708						TAILQ_INSERT_AFTER(&strm->inqueue,
709						    at, control, next);
710						break;
711					}
712				}
713			}
714		}
715	}
716}
717
718/*
719 * Returns two things: You get the total size of the deliverable parts of the
720 * first fragmented message on the reassembly queue. And you get a 1 back if
721 * all of the message is ready or a 0 back if the message is still incomplete
722 */
723static int
724sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
725{
726	struct sctp_tmit_chunk *chk;
727	uint32_t tsn;
728
729	*t_size = 0;
730	chk = TAILQ_FIRST(&asoc->reasmqueue);
731	if (chk == NULL) {
732		/* nothing on the queue */
733		return (0);
734	}
735	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
736		/* Not a first on the queue */
737		return (0);
738	}
739	tsn = chk->rec.data.TSN_seq;
740	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
741		if (tsn != chk->rec.data.TSN_seq) {
742			return (0);
743		}
744		*t_size += chk->send_size;
745		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
746			return (1);
747		}
748		tsn++;
749	}
750	return (0);
751}
752
753static void
754sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
755{
756	struct sctp_tmit_chunk *chk;
757	uint16_t nxt_todel;
758	uint32_t tsize, pd_point;
759
760doit_again:
761	chk = TAILQ_FIRST(&asoc->reasmqueue);
762	if (chk == NULL) {
763		/* Huh? */
764		asoc->size_on_reasm_queue = 0;
765		asoc->cnt_on_reasm_queue = 0;
766		return;
767	}
768	if (asoc->fragmented_delivery_inprogress == 0) {
769		nxt_todel =
770		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
771		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
772		    (nxt_todel == chk->rec.data.stream_seq ||
773		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
774			/*
775			 * Yep the first one is here and its ok to deliver
776			 * but should we?
777			 */
778			if (stcb->sctp_socket) {
779				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
780				    stcb->sctp_ep->partial_delivery_point);
781			} else {
782				pd_point = stcb->sctp_ep->partial_delivery_point;
783			}
784			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
785				/*
786				 * Yes, we setup to start reception, by
787				 * backing down the TSN just in case we
788				 * can't deliver. If we
789				 */
790				asoc->fragmented_delivery_inprogress = 1;
791				asoc->tsn_last_delivered =
792				    chk->rec.data.TSN_seq - 1;
793				asoc->str_of_pdapi =
794				    chk->rec.data.stream_number;
795				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
796				asoc->pdapi_ppid = chk->rec.data.payloadtype;
797				asoc->fragment_flags = chk->rec.data.rcv_flags;
798				sctp_service_reassembly(stcb, asoc);
799			}
800		}
801	} else {
802		/*
803		 * Service re-assembly will deliver stream data queued at
804		 * the end of fragmented delivery.. but it wont know to go
805		 * back and call itself again... we do that here with the
806		 * got doit_again
807		 */
808		sctp_service_reassembly(stcb, asoc);
809		if (asoc->fragmented_delivery_inprogress == 0) {
810			/*
811			 * finished our Fragmented delivery, could be more
812			 * waiting?
813			 */
814			goto doit_again;
815		}
816	}
817}
818
819/*
820 * Dump onto the re-assembly queue, in its proper place. After dumping on the
821 * queue, see if anthing can be delivered. If so pull it off (or as much as
822 * we can. If we run out of space then we must dump what we can and set the
823 * appropriate flag to say we queued what we could.
824 */
825static void
826sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
827    struct sctp_tmit_chunk *chk, int *abort_flag)
828{
829	struct mbuf *op_err;
830	char msg[SCTP_DIAG_INFO_LEN];
831	uint32_t cum_ackp1, prev_tsn, post_tsn;
832	struct sctp_tmit_chunk *at, *prev, *next;
833
834	prev = next = NULL;
835	cum_ackp1 = asoc->tsn_last_delivered + 1;
836	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
837		/* This is the first one on the queue */
838		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
839		/*
840		 * we do not check for delivery of anything when only one
841		 * fragment is here
842		 */
843		asoc->size_on_reasm_queue = chk->send_size;
844		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
845		if (chk->rec.data.TSN_seq == cum_ackp1) {
846			if (asoc->fragmented_delivery_inprogress == 0 &&
847			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
848			    SCTP_DATA_FIRST_FRAG) {
849				/*
850				 * An empty queue, no delivery inprogress,
851				 * we hit the next one and it does NOT have
852				 * a FIRST fragment mark.
853				 */
854				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
855				snprintf(msg, sizeof(msg),
856				    "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
857				    chk->rec.data.TSN_seq,
858				    chk->rec.data.stream_number,
859				    chk->rec.data.stream_seq);
860				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
861				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
862				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
863				*abort_flag = 1;
864			} else if (asoc->fragmented_delivery_inprogress &&
865			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
866				/*
867				 * We are doing a partial delivery and the
868				 * NEXT chunk MUST be either the LAST or
869				 * MIDDLE fragment NOT a FIRST
870				 */
871				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
872				snprintf(msg, sizeof(msg),
873				    "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
874				    chk->rec.data.TSN_seq,
875				    chk->rec.data.stream_number,
876				    chk->rec.data.stream_seq);
877				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
878				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
879				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
880				*abort_flag = 1;
881			} else if (asoc->fragmented_delivery_inprogress) {
882				/*
883				 * Here we are ok with a MIDDLE or LAST
884				 * piece
885				 */
886				if (chk->rec.data.stream_number !=
887				    asoc->str_of_pdapi) {
888					/* Got to be the right STR No */
889					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
890					    chk->rec.data.stream_number,
891					    asoc->str_of_pdapi);
892					snprintf(msg, sizeof(msg),
893					    "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
894					    asoc->str_of_pdapi,
895					    chk->rec.data.TSN_seq,
896					    chk->rec.data.stream_number,
897					    chk->rec.data.stream_seq);
898					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
899					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
900					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
901					*abort_flag = 1;
902				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
903					    SCTP_DATA_UNORDERED &&
904				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
905					/* Got to be the right STR Seq */
906					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
907					    chk->rec.data.stream_seq,
908					    asoc->ssn_of_pdapi);
909					snprintf(msg, sizeof(msg),
910					    "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
911					    asoc->ssn_of_pdapi,
912					    chk->rec.data.TSN_seq,
913					    chk->rec.data.stream_number,
914					    chk->rec.data.stream_seq);
915					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
916					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
917					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
918					*abort_flag = 1;
919				}
920			}
921		}
922		return;
923	}
924	/* Find its place */
925	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
926		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
927			/*
928			 * one in queue is bigger than the new one, insert
929			 * before this one
930			 */
931			/* A check */
932			asoc->size_on_reasm_queue += chk->send_size;
933			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
934			next = at;
935			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
936			break;
937		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
938			/* Gak, He sent me a duplicate str seq number */
939			/*
940			 * foo bar, I guess I will just free this new guy,
941			 * should we abort too? FIX ME MAYBE? Or it COULD be
942			 * that the SSN's have wrapped. Maybe I should
943			 * compare to TSN somehow... sigh for now just blow
944			 * away the chunk!
945			 */
946			if (chk->data) {
947				sctp_m_freem(chk->data);
948				chk->data = NULL;
949			}
950			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
951			return;
952		} else {
953			prev = at;
954			if (TAILQ_NEXT(at, sctp_next) == NULL) {
955				/*
956				 * We are at the end, insert it after this
957				 * one
958				 */
959				/* check it first */
960				asoc->size_on_reasm_queue += chk->send_size;
961				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
962				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
963				break;
964			}
965		}
966	}
967	/* Now the audits */
968	if (prev) {
969		prev_tsn = chk->rec.data.TSN_seq - 1;
970		if (prev_tsn == prev->rec.data.TSN_seq) {
971			/*
972			 * Ok the one I am dropping onto the end is the
973			 * NEXT. A bit of valdiation here.
974			 */
975			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
976			    SCTP_DATA_FIRST_FRAG ||
977			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
978			    SCTP_DATA_MIDDLE_FRAG) {
979				/*
980				 * Insert chk MUST be a MIDDLE or LAST
981				 * fragment
982				 */
983				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
984				    SCTP_DATA_FIRST_FRAG) {
985					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
986					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
987					snprintf(msg, sizeof(msg),
988					    "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
989					    chk->rec.data.TSN_seq,
990					    chk->rec.data.stream_number,
991					    chk->rec.data.stream_seq);
992					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
993					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
994					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
995					*abort_flag = 1;
996					return;
997				}
998				if (chk->rec.data.stream_number !=
999				    prev->rec.data.stream_number) {
1000					/*
1001					 * Huh, need the correct STR here,
1002					 * they must be the same.
1003					 */
1004					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1005					    chk->rec.data.stream_number,
1006					    prev->rec.data.stream_number);
1007					snprintf(msg, sizeof(msg),
1008					    "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1009					    prev->rec.data.stream_number,
1010					    chk->rec.data.TSN_seq,
1011					    chk->rec.data.stream_number,
1012					    chk->rec.data.stream_seq);
1013					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1014					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1015					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1016					*abort_flag = 1;
1017					return;
1018				}
1019				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1020				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1021					/*
1022					 * Huh, need the same ordering here,
1023					 * they must be the same.
1024					 */
1025					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1026					snprintf(msg, sizeof(msg),
1027					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1028					    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1029					    chk->rec.data.TSN_seq,
1030					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1031					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1032					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1033					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1034					*abort_flag = 1;
1035					return;
1036				}
1037				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1038				    chk->rec.data.stream_seq !=
1039				    prev->rec.data.stream_seq) {
1040					/*
1041					 * Huh, need the correct STR here,
1042					 * they must be the same.
1043					 */
1044					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1045					    chk->rec.data.stream_seq,
1046					    prev->rec.data.stream_seq);
1047					snprintf(msg, sizeof(msg),
1048					    "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1049					    prev->rec.data.stream_seq,
1050					    chk->rec.data.TSN_seq,
1051					    chk->rec.data.stream_number,
1052					    chk->rec.data.stream_seq);
1053					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1054					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1055					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1056					*abort_flag = 1;
1057					return;
1058				}
1059			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1060			    SCTP_DATA_LAST_FRAG) {
1061				/* Insert chk MUST be a FIRST */
1062				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1063				    SCTP_DATA_FIRST_FRAG) {
1064					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1065					snprintf(msg, sizeof(msg),
1066					    "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1067					    chk->rec.data.TSN_seq,
1068					    chk->rec.data.stream_number,
1069					    chk->rec.data.stream_seq);
1070					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1071					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1072					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1073					*abort_flag = 1;
1074					return;
1075				}
1076			}
1077		}
1078	}
1079	if (next) {
1080		post_tsn = chk->rec.data.TSN_seq + 1;
1081		if (post_tsn == next->rec.data.TSN_seq) {
1082			/*
1083			 * Ok the one I am inserting ahead of is my NEXT
1084			 * one. A bit of valdiation here.
1085			 */
1086			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1087				/* Insert chk MUST be a last fragment */
1088				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1089				    != SCTP_DATA_LAST_FRAG) {
1090					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1091					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1092					snprintf(msg, sizeof(msg),
1093					    "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1094					    chk->rec.data.TSN_seq,
1095					    chk->rec.data.stream_number,
1096					    chk->rec.data.stream_seq);
1097					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1098					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1099					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1100					*abort_flag = 1;
1101					return;
1102				}
1103			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1104				    SCTP_DATA_MIDDLE_FRAG ||
1105				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1106			    SCTP_DATA_LAST_FRAG) {
1107				/*
1108				 * Insert chk CAN be MIDDLE or FIRST NOT
1109				 * LAST
1110				 */
1111				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1112				    SCTP_DATA_LAST_FRAG) {
1113					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1114					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1115					snprintf(msg, sizeof(msg),
1116					    "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1117					    chk->rec.data.TSN_seq,
1118					    chk->rec.data.stream_number,
1119					    chk->rec.data.stream_seq);
1120					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1121					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1122					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1123					*abort_flag = 1;
1124					return;
1125				}
1126				if (chk->rec.data.stream_number !=
1127				    next->rec.data.stream_number) {
1128					/*
1129					 * Huh, need the correct STR here,
1130					 * they must be the same.
1131					 */
1132					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1133					    chk->rec.data.stream_number,
1134					    next->rec.data.stream_number);
1135					snprintf(msg, sizeof(msg),
1136					    "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1137					    next->rec.data.stream_number,
1138					    chk->rec.data.TSN_seq,
1139					    chk->rec.data.stream_number,
1140					    chk->rec.data.stream_seq);
1141					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1142					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1143					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1144					*abort_flag = 1;
1145					return;
1146				}
1147				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1148				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1149					/*
1150					 * Huh, need the same ordering here,
1151					 * they must be the same.
1152					 */
1153					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1154					snprintf(msg, sizeof(msg),
1155					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1156					    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1157					    chk->rec.data.TSN_seq,
1158					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1159					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1160					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1161					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1162					*abort_flag = 1;
1163					return;
1164				}
1165				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1166				    chk->rec.data.stream_seq !=
1167				    next->rec.data.stream_seq) {
1168					/*
1169					 * Huh, need the correct STR here,
1170					 * they must be the same.
1171					 */
1172					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1173					    chk->rec.data.stream_seq,
1174					    next->rec.data.stream_seq);
1175					snprintf(msg, sizeof(msg),
1176					    "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1177					    next->rec.data.stream_seq,
1178					    chk->rec.data.TSN_seq,
1179					    chk->rec.data.stream_number,
1180					    chk->rec.data.stream_seq);
1181					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1182					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1183					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1184					*abort_flag = 1;
1185					return;
1186				}
1187			}
1188		}
1189	}
1190	/* Do we need to do some delivery? check */
1191	sctp_deliver_reasm_check(stcb, asoc);
1192}
1193
1194/*
1195 * This is an unfortunate routine. It checks to make sure a evil guy is not
1196 * stuffing us full of bad packet fragments. A broken peer could also do this
1197 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1198 * :< more cycles.
1199 */
1200static int
1201sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1202    uint32_t TSN_seq)
1203{
1204	struct sctp_tmit_chunk *at;
1205	uint32_t tsn_est;
1206
1207	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1208		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1209			/* is it one bigger? */
1210			tsn_est = at->rec.data.TSN_seq + 1;
1211			if (tsn_est == TSN_seq) {
1212				/* yep. It better be a last then */
1213				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1214				    SCTP_DATA_LAST_FRAG) {
1215					/*
1216					 * Ok this guy belongs next to a guy
1217					 * that is NOT last, it should be a
1218					 * middle/last, not a complete
1219					 * chunk.
1220					 */
1221					return (1);
1222				} else {
1223					/*
1224					 * This guy is ok since its a LAST
1225					 * and the new chunk is a fully
1226					 * self- contained one.
1227					 */
1228					return (0);
1229				}
1230			}
1231		} else if (TSN_seq == at->rec.data.TSN_seq) {
1232			/* Software error since I have a dup? */
1233			return (1);
1234		} else {
1235			/*
1236			 * Ok, 'at' is larger than new chunk but does it
1237			 * need to be right before it.
1238			 */
1239			tsn_est = TSN_seq + 1;
1240			if (tsn_est == at->rec.data.TSN_seq) {
1241				/* Yep, It better be a first */
1242				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1243				    SCTP_DATA_FIRST_FRAG) {
1244					return (1);
1245				} else {
1246					return (0);
1247				}
1248			}
1249		}
1250	}
1251	return (0);
1252}
1253
1254static int
1255sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1256    struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1257    struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1258    int *break_flag, int last_chunk)
1259{
1260	/* Process a data chunk */
1261	/* struct sctp_tmit_chunk *chk; */
1262	struct sctp_tmit_chunk *chk;
1263	uint32_t tsn, gap;
1264	struct mbuf *dmbuf;
1265	int the_len;
1266	int need_reasm_check = 0;
1267	uint16_t strmno, strmseq;
1268	struct mbuf *op_err;
1269	char msg[SCTP_DIAG_INFO_LEN];
1270	struct sctp_queued_to_read *control;
1271	int ordered;
1272	uint32_t protocol_id;
1273	uint8_t chunk_flags;
1274	struct sctp_stream_reset_list *liste;
1275
1276	chk = NULL;
1277	tsn = ntohl(ch->dp.tsn);
1278	chunk_flags = ch->ch.chunk_flags;
1279	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1280		asoc->send_sack = 1;
1281	}
1282	protocol_id = ch->dp.protocol_id;
1283	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1284	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1285		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1286	}
1287	if (stcb == NULL) {
1288		return (0);
1289	}
1290	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1291	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1292		/* It is a duplicate */
1293		SCTP_STAT_INCR(sctps_recvdupdata);
1294		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1295			/* Record a dup for the next outbound sack */
1296			asoc->dup_tsns[asoc->numduptsns] = tsn;
1297			asoc->numduptsns++;
1298		}
1299		asoc->send_sack = 1;
1300		return (0);
1301	}
1302	/* Calculate the number of TSN's between the base and this TSN */
1303	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1304	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1305		/* Can't hold the bit in the mapping at max array, toss it */
1306		return (0);
1307	}
1308	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1309		SCTP_TCB_LOCK_ASSERT(stcb);
1310		if (sctp_expand_mapping_array(asoc, gap)) {
1311			/* Can't expand, drop it */
1312			return (0);
1313		}
1314	}
1315	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1316		*high_tsn = tsn;
1317	}
1318	/* See if we have received this one already */
1319	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1320	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1321		SCTP_STAT_INCR(sctps_recvdupdata);
1322		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1323			/* Record a dup for the next outbound sack */
1324			asoc->dup_tsns[asoc->numduptsns] = tsn;
1325			asoc->numduptsns++;
1326		}
1327		asoc->send_sack = 1;
1328		return (0);
1329	}
1330	/*
1331	 * Check to see about the GONE flag, duplicates would cause a sack
1332	 * to be sent up above
1333	 */
1334	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1335	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1336	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1337		/*
1338		 * wait a minute, this guy is gone, there is no longer a
1339		 * receiver. Send peer an ABORT!
1340		 */
1341		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1342		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1343		*abort_flag = 1;
1344		return (0);
1345	}
1346	/*
1347	 * Now before going further we see if there is room. If NOT then we
1348	 * MAY let one through only IF this TSN is the one we are waiting
1349	 * for on a partial delivery API.
1350	 */
1351
1352	/* now do the tests */
1353	if (((asoc->cnt_on_all_streams +
1354	    asoc->cnt_on_reasm_queue +
1355	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1356	    (((int)asoc->my_rwnd) <= 0)) {
1357		/*
1358		 * When we have NO room in the rwnd we check to make sure
1359		 * the reader is doing its job...
1360		 */
1361		if (stcb->sctp_socket->so_rcv.sb_cc) {
1362			/* some to read, wake-up */
1363#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1364			struct socket *so;
1365
1366			so = SCTP_INP_SO(stcb->sctp_ep);
1367			atomic_add_int(&stcb->asoc.refcnt, 1);
1368			SCTP_TCB_UNLOCK(stcb);
1369			SCTP_SOCKET_LOCK(so, 1);
1370			SCTP_TCB_LOCK(stcb);
1371			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1372			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1373				/* assoc was freed while we were unlocked */
1374				SCTP_SOCKET_UNLOCK(so, 1);
1375				return (0);
1376			}
1377#endif
1378			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1379#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1380			SCTP_SOCKET_UNLOCK(so, 1);
1381#endif
1382		}
1383		/* now is it in the mapping array of what we have accepted? */
1384		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1385		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1386			/* Nope not in the valid range dump it */
1387			sctp_set_rwnd(stcb, asoc);
1388			if ((asoc->cnt_on_all_streams +
1389			    asoc->cnt_on_reasm_queue +
1390			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1391				SCTP_STAT_INCR(sctps_datadropchklmt);
1392			} else {
1393				SCTP_STAT_INCR(sctps_datadroprwnd);
1394			}
1395			*break_flag = 1;
1396			return (0);
1397		}
1398	}
1399	strmno = ntohs(ch->dp.stream_id);
1400	if (strmno >= asoc->streamincnt) {
1401		struct sctp_paramhdr *phdr;
1402		struct mbuf *mb;
1403
1404		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1405		    0, M_NOWAIT, 1, MT_DATA);
1406		if (mb != NULL) {
1407			/* add some space up front so prepend will work well */
1408			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1409			phdr = mtod(mb, struct sctp_paramhdr *);
1410			/*
1411			 * Error causes are just param's and this one has
1412			 * two back to back phdr, one with the error type
1413			 * and size, the other with the streamid and a rsvd
1414			 */
1415			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1416			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1417			phdr->param_length =
1418			    htons(sizeof(struct sctp_paramhdr) * 2);
1419			phdr++;
1420			/* We insert the stream in the type field */
1421			phdr->param_type = ch->dp.stream_id;
1422			/* And set the length to 0 for the rsvd field */
1423			phdr->param_length = 0;
1424			sctp_queue_op_err(stcb, mb);
1425		}
1426		SCTP_STAT_INCR(sctps_badsid);
1427		SCTP_TCB_LOCK_ASSERT(stcb);
1428		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1429		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1430			asoc->highest_tsn_inside_nr_map = tsn;
1431		}
1432		if (tsn == (asoc->cumulative_tsn + 1)) {
1433			/* Update cum-ack */
1434			asoc->cumulative_tsn = tsn;
1435		}
1436		return (0);
1437	}
1438	/*
1439	 * Before we continue lets validate that we are not being fooled by
1440	 * an evil attacker. We can only have 4k chunks based on our TSN
1441	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1442	 * way our stream sequence numbers could have wrapped. We of course
1443	 * only validate the FIRST fragment so the bit must be set.
1444	 */
1445	strmseq = ntohs(ch->dp.stream_sequence);
1446#ifdef SCTP_ASOCLOG_OF_TSNS
1447	SCTP_TCB_LOCK_ASSERT(stcb);
1448	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1449		asoc->tsn_in_at = 0;
1450		asoc->tsn_in_wrapped = 1;
1451	}
1452	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1453	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1454	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1455	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1456	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1457	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1458	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1459	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1460	asoc->tsn_in_at++;
1461#endif
1462	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1463	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1464	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1465	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1466		/* The incoming sseq is behind where we last delivered? */
1467		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1468		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1469
1470		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1471		    asoc->strmin[strmno].last_sequence_delivered,
1472		    tsn, strmno, strmseq);
1473		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1474		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1475		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1476		*abort_flag = 1;
1477		return (0);
1478	}
1479	/************************************
1480	 * From here down we may find ch-> invalid
1481	 * so its a good idea NOT to use it.
1482	 *************************************/
1483
1484	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1485	if (last_chunk == 0) {
1486		dmbuf = SCTP_M_COPYM(*m,
1487		    (offset + sizeof(struct sctp_data_chunk)),
1488		    the_len, M_NOWAIT);
1489#ifdef SCTP_MBUF_LOGGING
1490		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1491			struct mbuf *mat;
1492
1493			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1494				if (SCTP_BUF_IS_EXTENDED(mat)) {
1495					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1496				}
1497			}
1498		}
1499#endif
1500	} else {
1501		/* We can steal the last chunk */
1502		int l_len;
1503
1504		dmbuf = *m;
1505		/* lop off the top part */
1506		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1507		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1508			l_len = SCTP_BUF_LEN(dmbuf);
1509		} else {
1510			/*
1511			 * need to count up the size hopefully does not hit
1512			 * this to often :-0
1513			 */
1514			struct mbuf *lat;
1515
1516			l_len = 0;
1517			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1518				l_len += SCTP_BUF_LEN(lat);
1519			}
1520		}
1521		if (l_len > the_len) {
1522			/* Trim the end round bytes off  too */
1523			m_adj(dmbuf, -(l_len - the_len));
1524		}
1525	}
1526	if (dmbuf == NULL) {
1527		SCTP_STAT_INCR(sctps_nomem);
1528		return (0);
1529	}
1530	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1531	    asoc->fragmented_delivery_inprogress == 0 &&
1532	    TAILQ_EMPTY(&asoc->resetHead) &&
1533	    ((ordered == 0) ||
1534	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1535	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1536		/* Candidate for express delivery */
1537		/*
1538		 * Its not fragmented, No PD-API is up, Nothing in the
1539		 * delivery queue, Its un-ordered OR ordered and the next to
1540		 * deliver AND nothing else is stuck on the stream queue,
1541		 * And there is room for it in the socket buffer. Lets just
1542		 * stuff it up the buffer....
1543		 */
1544
1545		/* It would be nice to avoid this copy if we could :< */
1546		sctp_alloc_a_readq(stcb, control);
1547		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1548		    protocol_id,
1549		    strmno, strmseq,
1550		    chunk_flags,
1551		    dmbuf);
1552		if (control == NULL) {
1553			goto failed_express_del;
1554		}
1555		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1556		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1557			asoc->highest_tsn_inside_nr_map = tsn;
1558		}
1559		sctp_add_to_readq(stcb->sctp_ep, stcb,
1560		    control, &stcb->sctp_socket->so_rcv,
1561		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1562
1563		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1564			/* for ordered, bump what we delivered */
1565			asoc->strmin[strmno].last_sequence_delivered++;
1566		}
1567		SCTP_STAT_INCR(sctps_recvexpress);
1568		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1569			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1570			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1571		}
1572		control = NULL;
1573
1574		goto finish_express_del;
1575	}
1576failed_express_del:
1577	/* If we reach here this is a new chunk */
1578	chk = NULL;
1579	control = NULL;
1580	/* Express for fragmented delivery? */
1581	if ((asoc->fragmented_delivery_inprogress) &&
1582	    (stcb->asoc.control_pdapi) &&
1583	    (asoc->str_of_pdapi == strmno) &&
1584	    (asoc->ssn_of_pdapi == strmseq)
1585	    ) {
1586		control = stcb->asoc.control_pdapi;
1587		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1588			/* Can't be another first? */
1589			goto failed_pdapi_express_del;
1590		}
1591		if (tsn == (control->sinfo_tsn + 1)) {
1592			/* Yep, we can add it on */
1593			int end = 0;
1594
1595			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1596				end = 1;
1597			}
1598			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1599			    tsn,
1600			    &stcb->sctp_socket->so_rcv)) {
1601				SCTP_PRINTF("Append fails end:%d\n", end);
1602				goto failed_pdapi_express_del;
1603			}
1604			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1605			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1606				asoc->highest_tsn_inside_nr_map = tsn;
1607			}
1608			SCTP_STAT_INCR(sctps_recvexpressm);
1609			asoc->tsn_last_delivered = tsn;
1610			asoc->fragment_flags = chunk_flags;
1611			asoc->tsn_of_pdapi_last_delivered = tsn;
1612			asoc->last_flags_delivered = chunk_flags;
1613			asoc->last_strm_seq_delivered = strmseq;
1614			asoc->last_strm_no_delivered = strmno;
1615			if (end) {
1616				/* clean up the flags and such */
1617				asoc->fragmented_delivery_inprogress = 0;
1618				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1619					asoc->strmin[strmno].last_sequence_delivered++;
1620				}
1621				stcb->asoc.control_pdapi = NULL;
1622				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1623					/*
1624					 * There could be another message
1625					 * ready
1626					 */
1627					need_reasm_check = 1;
1628				}
1629			}
1630			control = NULL;
1631			goto finish_express_del;
1632		}
1633	}
1634failed_pdapi_express_del:
1635	control = NULL;
1636	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1637		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1638		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1639			asoc->highest_tsn_inside_nr_map = tsn;
1640		}
1641	} else {
1642		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1643		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1644			asoc->highest_tsn_inside_map = tsn;
1645		}
1646	}
1647	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1648		sctp_alloc_a_chunk(stcb, chk);
1649		if (chk == NULL) {
1650			/* No memory so we drop the chunk */
1651			SCTP_STAT_INCR(sctps_nomem);
1652			if (last_chunk == 0) {
1653				/* we copied it, free the copy */
1654				sctp_m_freem(dmbuf);
1655			}
1656			return (0);
1657		}
1658		chk->rec.data.TSN_seq = tsn;
1659		chk->no_fr_allowed = 0;
1660		chk->rec.data.stream_seq = strmseq;
1661		chk->rec.data.stream_number = strmno;
1662		chk->rec.data.payloadtype = protocol_id;
1663		chk->rec.data.context = stcb->asoc.context;
1664		chk->rec.data.doing_fast_retransmit = 0;
1665		chk->rec.data.rcv_flags = chunk_flags;
1666		chk->asoc = asoc;
1667		chk->send_size = the_len;
1668		chk->whoTo = net;
1669		atomic_add_int(&net->ref_count, 1);
1670		chk->data = dmbuf;
1671	} else {
1672		sctp_alloc_a_readq(stcb, control);
1673		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1674		    protocol_id,
1675		    strmno, strmseq,
1676		    chunk_flags,
1677		    dmbuf);
1678		if (control == NULL) {
1679			/* No memory so we drop the chunk */
1680			SCTP_STAT_INCR(sctps_nomem);
1681			if (last_chunk == 0) {
1682				/* we copied it, free the copy */
1683				sctp_m_freem(dmbuf);
1684			}
1685			return (0);
1686		}
1687		control->length = the_len;
1688	}
1689
1690	/* Mark it as received */
1691	/* Now queue it where it belongs */
1692	if (control != NULL) {
1693		/* First a sanity check */
1694		if (asoc->fragmented_delivery_inprogress) {
1695			/*
1696			 * Ok, we have a fragmented delivery in progress if
1697			 * this chunk is next to deliver OR belongs in our
1698			 * view to the reassembly, the peer is evil or
1699			 * broken.
1700			 */
1701			uint32_t estimate_tsn;
1702
1703			estimate_tsn = asoc->tsn_last_delivered + 1;
1704			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1705			    (estimate_tsn == control->sinfo_tsn)) {
1706				/* Evil/Broke peer */
1707				sctp_m_freem(control->data);
1708				control->data = NULL;
1709				if (control->whoFrom) {
1710					sctp_free_remote_addr(control->whoFrom);
1711					control->whoFrom = NULL;
1712				}
1713				sctp_free_a_readq(stcb, control);
1714				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1715				    tsn, strmno, strmseq);
1716				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1717				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1718				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1719				*abort_flag = 1;
1720				return (0);
1721			} else {
1722				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1723					sctp_m_freem(control->data);
1724					control->data = NULL;
1725					if (control->whoFrom) {
1726						sctp_free_remote_addr(control->whoFrom);
1727						control->whoFrom = NULL;
1728					}
1729					sctp_free_a_readq(stcb, control);
1730					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1731					    tsn, strmno, strmseq);
1732					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1733					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1734					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1735					*abort_flag = 1;
1736					return (0);
1737				}
1738			}
1739		} else {
1740			/* No PDAPI running */
1741			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1742				/*
1743				 * Reassembly queue is NOT empty validate
1744				 * that this tsn does not need to be in
1745				 * reasembly queue. If it does then our peer
1746				 * is broken or evil.
1747				 */
1748				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1749					sctp_m_freem(control->data);
1750					control->data = NULL;
1751					if (control->whoFrom) {
1752						sctp_free_remote_addr(control->whoFrom);
1753						control->whoFrom = NULL;
1754					}
1755					sctp_free_a_readq(stcb, control);
1756					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1757					    tsn, strmno, strmseq);
1758					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1759					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1760					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1761					*abort_flag = 1;
1762					return (0);
1763				}
1764			}
1765		}
1766		/* ok, if we reach here we have passed the sanity checks */
1767		if (chunk_flags & SCTP_DATA_UNORDERED) {
1768			/* queue directly into socket buffer */
1769			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1770			sctp_add_to_readq(stcb->sctp_ep, stcb,
1771			    control,
1772			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1773		} else {
1774			/*
1775			 * Special check for when streams are resetting. We
1776			 * could be more smart about this and check the
1777			 * actual stream to see if it is not being reset..
1778			 * that way we would not create a HOLB when amongst
1779			 * streams being reset and those not being reset.
1780			 *
1781			 * We take complete messages that have a stream reset
1782			 * intervening (aka the TSN is after where our
1783			 * cum-ack needs to be) off and put them on a
1784			 * pending_reply_queue. The reassembly ones we do
1785			 * not have to worry about since they are all sorted
1786			 * and proceessed by TSN order. It is only the
1787			 * singletons I must worry about.
1788			 */
1789			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1790			    SCTP_TSN_GT(tsn, liste->tsn)) {
1791				/*
1792				 * yep its past where we need to reset... go
1793				 * ahead and queue it.
1794				 */
1795				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1796					/* first one on */
1797					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1798				} else {
1799					struct sctp_queued_to_read *ctlOn,
1800					                   *nctlOn;
1801					unsigned char inserted = 0;
1802
1803					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1804						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1805							continue;
1806						} else {
1807							/* found it */
1808							TAILQ_INSERT_BEFORE(ctlOn, control, next);
1809							inserted = 1;
1810							break;
1811						}
1812					}
1813					if (inserted == 0) {
1814						/*
1815						 * must be put at end, use
1816						 * prevP (all setup from
1817						 * loop) to setup nextP.
1818						 */
1819						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1820					}
1821				}
1822			} else {
1823				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1824				if (*abort_flag) {
1825					return (0);
1826				}
1827			}
1828		}
1829	} else {
1830		/* Into the re-assembly queue */
1831		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1832		if (*abort_flag) {
1833			/*
1834			 * the assoc is now gone and chk was put onto the
1835			 * reasm queue, which has all been freed.
1836			 */
1837			*m = NULL;
1838			return (0);
1839		}
1840	}
1841finish_express_del:
1842	if (tsn == (asoc->cumulative_tsn + 1)) {
1843		/* Update cum-ack */
1844		asoc->cumulative_tsn = tsn;
1845	}
1846	if (last_chunk) {
1847		*m = NULL;
1848	}
1849	if (ordered) {
1850		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1851	} else {
1852		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1853	}
1854	SCTP_STAT_INCR(sctps_recvdata);
1855	/* Set it present please */
1856	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1857		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1858	}
1859	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1860		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1861		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1862	}
1863	/* check the special flag for stream resets */
1864	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1865	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1866		/*
1867		 * we have finished working through the backlogged TSN's now
1868		 * time to reset streams. 1: call reset function. 2: free
1869		 * pending_reply space 3: distribute any chunks in
1870		 * pending_reply_queue.
1871		 */
1872		struct sctp_queued_to_read *ctl, *nctl;
1873
1874		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1875		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1876		SCTP_FREE(liste, SCTP_M_STRESET);
1877		/* sa_ignore FREED_MEMORY */
1878		liste = TAILQ_FIRST(&asoc->resetHead);
1879		if (TAILQ_EMPTY(&asoc->resetHead)) {
1880			/* All can be removed */
1881			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1882				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1883				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1884				if (*abort_flag) {
1885					return (0);
1886				}
1887			}
1888		} else {
1889			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1890				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1891					break;
1892				}
1893				/*
1894				 * if ctl->sinfo_tsn is <= liste->tsn we can
1895				 * process it which is the NOT of
1896				 * ctl->sinfo_tsn > liste->tsn
1897				 */
1898				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1899				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1900				if (*abort_flag) {
1901					return (0);
1902				}
1903			}
1904		}
1905		/*
1906		 * Now service re-assembly to pick up anything that has been
1907		 * held on reassembly queue?
1908		 */
1909		sctp_deliver_reasm_check(stcb, asoc);
1910		need_reasm_check = 0;
1911	}
1912	if (need_reasm_check) {
1913		/* Another one waits ? */
1914		sctp_deliver_reasm_check(stcb, asoc);
1915	}
1916	return (1);
1917}
1918
1919int8_t sctp_map_lookup_tab[256] = {
1920	0, 1, 0, 2, 0, 1, 0, 3,
1921	0, 1, 0, 2, 0, 1, 0, 4,
1922	0, 1, 0, 2, 0, 1, 0, 3,
1923	0, 1, 0, 2, 0, 1, 0, 5,
1924	0, 1, 0, 2, 0, 1, 0, 3,
1925	0, 1, 0, 2, 0, 1, 0, 4,
1926	0, 1, 0, 2, 0, 1, 0, 3,
1927	0, 1, 0, 2, 0, 1, 0, 6,
1928	0, 1, 0, 2, 0, 1, 0, 3,
1929	0, 1, 0, 2, 0, 1, 0, 4,
1930	0, 1, 0, 2, 0, 1, 0, 3,
1931	0, 1, 0, 2, 0, 1, 0, 5,
1932	0, 1, 0, 2, 0, 1, 0, 3,
1933	0, 1, 0, 2, 0, 1, 0, 4,
1934	0, 1, 0, 2, 0, 1, 0, 3,
1935	0, 1, 0, 2, 0, 1, 0, 7,
1936	0, 1, 0, 2, 0, 1, 0, 3,
1937	0, 1, 0, 2, 0, 1, 0, 4,
1938	0, 1, 0, 2, 0, 1, 0, 3,
1939	0, 1, 0, 2, 0, 1, 0, 5,
1940	0, 1, 0, 2, 0, 1, 0, 3,
1941	0, 1, 0, 2, 0, 1, 0, 4,
1942	0, 1, 0, 2, 0, 1, 0, 3,
1943	0, 1, 0, 2, 0, 1, 0, 6,
1944	0, 1, 0, 2, 0, 1, 0, 3,
1945	0, 1, 0, 2, 0, 1, 0, 4,
1946	0, 1, 0, 2, 0, 1, 0, 3,
1947	0, 1, 0, 2, 0, 1, 0, 5,
1948	0, 1, 0, 2, 0, 1, 0, 3,
1949	0, 1, 0, 2, 0, 1, 0, 4,
1950	0, 1, 0, 2, 0, 1, 0, 3,
1951	0, 1, 0, 2, 0, 1, 0, 8
1952};
1953
1954
1955void
1956sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1957{
1958	/*
1959	 * Now we also need to check the mapping array in a couple of ways.
1960	 * 1) Did we move the cum-ack point?
1961	 *
1962	 * When you first glance at this you might think that all entries that
1963	 * make up the postion of the cum-ack would be in the nr-mapping
1964	 * array only.. i.e. things up to the cum-ack are always
1965	 * deliverable. Thats true with one exception, when its a fragmented
1966	 * message we may not deliver the data until some threshold (or all
1967	 * of it) is in place. So we must OR the nr_mapping_array and
1968	 * mapping_array to get a true picture of the cum-ack.
1969	 */
1970	struct sctp_association *asoc;
1971	int at;
1972	uint8_t val;
1973	int slide_from, slide_end, lgap, distance;
1974	uint32_t old_cumack, old_base, old_highest, highest_tsn;
1975
1976	asoc = &stcb->asoc;
1977
1978	old_cumack = asoc->cumulative_tsn;
1979	old_base = asoc->mapping_array_base_tsn;
1980	old_highest = asoc->highest_tsn_inside_map;
1981	/*
1982	 * We could probably improve this a small bit by calculating the
1983	 * offset of the current cum-ack as the starting point.
1984	 */
1985	at = 0;
1986	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
1987		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
1988		if (val == 0xff) {
1989			at += 8;
1990		} else {
1991			/* there is a 0 bit */
1992			at += sctp_map_lookup_tab[val];
1993			break;
1994		}
1995	}
1996	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
1997
1998	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
1999	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2000#ifdef INVARIANTS
2001		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2002		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2003#else
2004		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2005		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2006		sctp_print_mapping_array(asoc);
2007		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2008			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2009		}
2010		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2011		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2012#endif
2013	}
2014	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2015		highest_tsn = asoc->highest_tsn_inside_nr_map;
2016	} else {
2017		highest_tsn = asoc->highest_tsn_inside_map;
2018	}
2019	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2020		/* The complete array was completed by a single FR */
2021		/* highest becomes the cum-ack */
2022		int clr;
2023
2024#ifdef INVARIANTS
2025		unsigned int i;
2026
2027#endif
2028
2029		/* clear the array */
2030		clr = ((at + 7) >> 3);
2031		if (clr > asoc->mapping_array_size) {
2032			clr = asoc->mapping_array_size;
2033		}
2034		memset(asoc->mapping_array, 0, clr);
2035		memset(asoc->nr_mapping_array, 0, clr);
2036#ifdef INVARIANTS
2037		for (i = 0; i < asoc->mapping_array_size; i++) {
2038			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2039				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2040				sctp_print_mapping_array(asoc);
2041			}
2042		}
2043#endif
2044		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2045		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2046	} else if (at >= 8) {
2047		/* we can slide the mapping array down */
2048		/* slide_from holds where we hit the first NON 0xff byte */
2049
2050		/*
2051		 * now calculate the ceiling of the move using our highest
2052		 * TSN value
2053		 */
2054		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2055		slide_end = (lgap >> 3);
2056		if (slide_end < slide_from) {
2057			sctp_print_mapping_array(asoc);
2058#ifdef INVARIANTS
2059			panic("impossible slide");
2060#else
2061			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2062			    lgap, slide_end, slide_from, at);
2063			return;
2064#endif
2065		}
2066		if (slide_end > asoc->mapping_array_size) {
2067#ifdef INVARIANTS
2068			panic("would overrun buffer");
2069#else
2070			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2071			    asoc->mapping_array_size, slide_end);
2072			slide_end = asoc->mapping_array_size;
2073#endif
2074		}
2075		distance = (slide_end - slide_from) + 1;
2076		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2077			sctp_log_map(old_base, old_cumack, old_highest,
2078			    SCTP_MAP_PREPARE_SLIDE);
2079			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2080			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2081		}
2082		if (distance + slide_from > asoc->mapping_array_size ||
2083		    distance < 0) {
2084			/*
2085			 * Here we do NOT slide forward the array so that
2086			 * hopefully when more data comes in to fill it up
2087			 * we will be able to slide it forward. Really I
2088			 * don't think this should happen :-0
2089			 */
2090
2091			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2092				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2093				    (uint32_t) asoc->mapping_array_size,
2094				    SCTP_MAP_SLIDE_NONE);
2095			}
2096		} else {
2097			int ii;
2098
2099			for (ii = 0; ii < distance; ii++) {
2100				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2101				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2102
2103			}
2104			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2105				asoc->mapping_array[ii] = 0;
2106				asoc->nr_mapping_array[ii] = 0;
2107			}
2108			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2109				asoc->highest_tsn_inside_map += (slide_from << 3);
2110			}
2111			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2112				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2113			}
2114			asoc->mapping_array_base_tsn += (slide_from << 3);
2115			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2116				sctp_log_map(asoc->mapping_array_base_tsn,
2117				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2118				    SCTP_MAP_SLIDE_RESULT);
2119			}
2120		}
2121	}
2122}
2123
2124void
2125sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2126{
2127	struct sctp_association *asoc;
2128	uint32_t highest_tsn;
2129
2130	asoc = &stcb->asoc;
2131	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2132		highest_tsn = asoc->highest_tsn_inside_nr_map;
2133	} else {
2134		highest_tsn = asoc->highest_tsn_inside_map;
2135	}
2136
2137	/*
2138	 * Now we need to see if we need to queue a sack or just start the
2139	 * timer (if allowed).
2140	 */
2141	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2142		/*
2143		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2144		 * sure SACK timer is off and instead send a SHUTDOWN and a
2145		 * SACK
2146		 */
2147		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2148			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2149			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2150		}
2151		sctp_send_shutdown(stcb,
2152		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2153		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2154	} else {
2155		int is_a_gap;
2156
2157		/* is there a gap now ? */
2158		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2159
2160		/*
2161		 * CMT DAC algorithm: increase number of packets received
2162		 * since last ack
2163		 */
2164		stcb->asoc.cmt_dac_pkts_rcvd++;
2165
2166		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2167							 * SACK */
2168		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2169							 * longer is one */
2170		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2171		    (is_a_gap) ||	/* is still a gap */
2172		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2173		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2174		    ) {
2175
2176			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2177			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2178			    (stcb->asoc.send_sack == 0) &&
2179			    (stcb->asoc.numduptsns == 0) &&
2180			    (stcb->asoc.delayed_ack) &&
2181			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2182
2183				/*
2184				 * CMT DAC algorithm: With CMT, delay acks
2185				 * even in the face of
2186				 *
2187				 * reordering. Therefore, if acks that do not
2188				 * have to be sent because of the above
2189				 * reasons, will be delayed. That is, acks
2190				 * that would have been sent due to gap
2191				 * reports will be delayed with DAC. Start
2192				 * the delayed ack timer.
2193				 */
2194				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2195				    stcb->sctp_ep, stcb, NULL);
2196			} else {
2197				/*
2198				 * Ok we must build a SACK since the timer
2199				 * is pending, we got our first packet OR
2200				 * there are gaps or duplicates.
2201				 */
2202				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2203				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2204			}
2205		} else {
2206			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2207				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2208				    stcb->sctp_ep, stcb, NULL);
2209			}
2210		}
2211	}
2212}
2213
2214void
2215sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2216{
2217	struct sctp_tmit_chunk *chk;
2218	uint32_t tsize, pd_point;
2219	uint16_t nxt_todel;
2220
2221	if (asoc->fragmented_delivery_inprogress) {
2222		sctp_service_reassembly(stcb, asoc);
2223	}
2224	/* Can we proceed further, i.e. the PD-API is complete */
2225	if (asoc->fragmented_delivery_inprogress) {
2226		/* no */
2227		return;
2228	}
2229	/*
2230	 * Now is there some other chunk I can deliver from the reassembly
2231	 * queue.
2232	 */
2233doit_again:
2234	chk = TAILQ_FIRST(&asoc->reasmqueue);
2235	if (chk == NULL) {
2236		asoc->size_on_reasm_queue = 0;
2237		asoc->cnt_on_reasm_queue = 0;
2238		return;
2239	}
2240	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2241	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2242	    ((nxt_todel == chk->rec.data.stream_seq) ||
2243	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2244		/*
2245		 * Yep the first one is here. We setup to start reception,
2246		 * by backing down the TSN just in case we can't deliver.
2247		 */
2248
2249		/*
2250		 * Before we start though either all of the message should
2251		 * be here or the socket buffer max or nothing on the
2252		 * delivery queue and something can be delivered.
2253		 */
2254		if (stcb->sctp_socket) {
2255			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2256			    stcb->sctp_ep->partial_delivery_point);
2257		} else {
2258			pd_point = stcb->sctp_ep->partial_delivery_point;
2259		}
2260		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2261			asoc->fragmented_delivery_inprogress = 1;
2262			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2263			asoc->str_of_pdapi = chk->rec.data.stream_number;
2264			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2265			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2266			asoc->fragment_flags = chk->rec.data.rcv_flags;
2267			sctp_service_reassembly(stcb, asoc);
2268			if (asoc->fragmented_delivery_inprogress == 0) {
2269				goto doit_again;
2270			}
2271		}
2272	}
2273}
2274
2275int
2276sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2277    struct sockaddr *src, struct sockaddr *dst,
2278    struct sctphdr *sh, struct sctp_inpcb *inp,
2279    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2280    uint8_t use_mflowid, uint32_t mflowid,
2281    uint32_t vrf_id, uint16_t port)
2282{
2283	struct sctp_data_chunk *ch, chunk_buf;
2284	struct sctp_association *asoc;
2285	int num_chunks = 0;	/* number of control chunks processed */
2286	int stop_proc = 0;
2287	int chk_length, break_flag, last_chunk;
2288	int abort_flag = 0, was_a_gap;
2289	struct mbuf *m;
2290	uint32_t highest_tsn;
2291
2292	/* set the rwnd */
2293	sctp_set_rwnd(stcb, &stcb->asoc);
2294
2295	m = *mm;
2296	SCTP_TCB_LOCK_ASSERT(stcb);
2297	asoc = &stcb->asoc;
2298	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2299		highest_tsn = asoc->highest_tsn_inside_nr_map;
2300	} else {
2301		highest_tsn = asoc->highest_tsn_inside_map;
2302	}
2303	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2304	/*
2305	 * setup where we got the last DATA packet from for any SACK that
2306	 * may need to go out. Don't bump the net. This is done ONLY when a
2307	 * chunk is assigned.
2308	 */
2309	asoc->last_data_chunk_from = net;
2310
2311	/*-
2312	 * Now before we proceed we must figure out if this is a wasted
2313	 * cluster... i.e. it is a small packet sent in and yet the driver
2314	 * underneath allocated a full cluster for it. If so we must copy it
2315	 * to a smaller mbuf and free up the cluster mbuf. This will help
2316	 * with cluster starvation. Note for __Panda__ we don't do this
2317	 * since it has clusters all the way down to 64 bytes.
2318	 */
2319	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2320		/* we only handle mbufs that are singletons.. not chains */
2321		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2322		if (m) {
2323			/* ok lets see if we can copy the data up */
2324			caddr_t *from, *to;
2325
2326			/* get the pointers and copy */
2327			to = mtod(m, caddr_t *);
2328			from = mtod((*mm), caddr_t *);
2329			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2330			/* copy the length and free up the old */
2331			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2332			sctp_m_freem(*mm);
2333			/* sucess, back copy */
2334			*mm = m;
2335		} else {
2336			/* We are in trouble in the mbuf world .. yikes */
2337			m = *mm;
2338		}
2339	}
2340	/* get pointer to the first chunk header */
2341	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2342	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2343	if (ch == NULL) {
2344		return (1);
2345	}
2346	/*
2347	 * process all DATA chunks...
2348	 */
2349	*high_tsn = asoc->cumulative_tsn;
2350	break_flag = 0;
2351	asoc->data_pkts_seen++;
2352	while (stop_proc == 0) {
2353		/* validate chunk length */
2354		chk_length = ntohs(ch->ch.chunk_length);
2355		if (length - *offset < chk_length) {
2356			/* all done, mutulated chunk */
2357			stop_proc = 1;
2358			continue;
2359		}
2360		if (ch->ch.chunk_type == SCTP_DATA) {
2361			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2362				/*
2363				 * Need to send an abort since we had a
2364				 * invalid data chunk.
2365				 */
2366				struct mbuf *op_err;
2367				char msg[SCTP_DIAG_INFO_LEN];
2368
2369				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2370				    chk_length);
2371				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2372				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2373				sctp_abort_association(inp, stcb, m, iphlen,
2374				    src, dst, sh, op_err,
2375				    use_mflowid, mflowid,
2376				    vrf_id, port);
2377				return (2);
2378			}
2379			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2380				/*
2381				 * Need to send an abort since we had an
2382				 * empty data chunk.
2383				 */
2384				struct mbuf *op_err;
2385
2386				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2387				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2388				sctp_abort_association(inp, stcb, m, iphlen,
2389				    src, dst, sh, op_err,
2390				    use_mflowid, mflowid,
2391				    vrf_id, port);
2392				return (2);
2393			}
2394#ifdef SCTP_AUDITING_ENABLED
2395			sctp_audit_log(0xB1, 0);
2396#endif
2397			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2398				last_chunk = 1;
2399			} else {
2400				last_chunk = 0;
2401			}
2402			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2403			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2404			    last_chunk)) {
2405				num_chunks++;
2406			}
2407			if (abort_flag)
2408				return (2);
2409
2410			if (break_flag) {
2411				/*
2412				 * Set because of out of rwnd space and no
2413				 * drop rep space left.
2414				 */
2415				stop_proc = 1;
2416				continue;
2417			}
2418		} else {
2419			/* not a data chunk in the data region */
2420			switch (ch->ch.chunk_type) {
2421			case SCTP_INITIATION:
2422			case SCTP_INITIATION_ACK:
2423			case SCTP_SELECTIVE_ACK:
2424			case SCTP_NR_SELECTIVE_ACK:
2425			case SCTP_HEARTBEAT_REQUEST:
2426			case SCTP_HEARTBEAT_ACK:
2427			case SCTP_ABORT_ASSOCIATION:
2428			case SCTP_SHUTDOWN:
2429			case SCTP_SHUTDOWN_ACK:
2430			case SCTP_OPERATION_ERROR:
2431			case SCTP_COOKIE_ECHO:
2432			case SCTP_COOKIE_ACK:
2433			case SCTP_ECN_ECHO:
2434			case SCTP_ECN_CWR:
2435			case SCTP_SHUTDOWN_COMPLETE:
2436			case SCTP_AUTHENTICATION:
2437			case SCTP_ASCONF_ACK:
2438			case SCTP_PACKET_DROPPED:
2439			case SCTP_STREAM_RESET:
2440			case SCTP_FORWARD_CUM_TSN:
2441			case SCTP_ASCONF:
2442				/*
2443				 * Now, what do we do with KNOWN chunks that
2444				 * are NOT in the right place?
2445				 *
2446				 * For now, I do nothing but ignore them. We
2447				 * may later want to add sysctl stuff to
2448				 * switch out and do either an ABORT() or
2449				 * possibly process them.
2450				 */
2451				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2452					struct mbuf *op_err;
2453
2454					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2455					sctp_abort_association(inp, stcb,
2456					    m, iphlen,
2457					    src, dst,
2458					    sh, op_err,
2459					    use_mflowid, mflowid,
2460					    vrf_id, port);
2461					return (2);
2462				}
2463				break;
2464			default:
2465				/* unknown chunk type, use bit rules */
2466				if (ch->ch.chunk_type & 0x40) {
2467					/* Add a error report to the queue */
2468					struct mbuf *merr;
2469					struct sctp_paramhdr *phd;
2470
2471					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2472					if (merr) {
2473						phd = mtod(merr, struct sctp_paramhdr *);
2474						/*
2475						 * We cheat and use param
2476						 * type since we did not
2477						 * bother to define a error
2478						 * cause struct. They are
2479						 * the same basic format
2480						 * with different names.
2481						 */
2482						phd->param_type =
2483						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2484						phd->param_length =
2485						    htons(chk_length + sizeof(*phd));
2486						SCTP_BUF_LEN(merr) = sizeof(*phd);
2487						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2488						if (SCTP_BUF_NEXT(merr)) {
2489							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2490								sctp_m_freem(merr);
2491							} else {
2492								sctp_queue_op_err(stcb, merr);
2493							}
2494						} else {
2495							sctp_m_freem(merr);
2496						}
2497					}
2498				}
2499				if ((ch->ch.chunk_type & 0x80) == 0) {
2500					/* discard the rest of this packet */
2501					stop_proc = 1;
2502				}	/* else skip this bad chunk and
2503					 * continue... */
2504				break;
2505			}	/* switch of chunk type */
2506		}
2507		*offset += SCTP_SIZE32(chk_length);
2508		if ((*offset >= length) || stop_proc) {
2509			/* no more data left in the mbuf chain */
2510			stop_proc = 1;
2511			continue;
2512		}
2513		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2514		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2515		if (ch == NULL) {
2516			*offset = length;
2517			stop_proc = 1;
2518			continue;
2519		}
2520	}
2521	if (break_flag) {
2522		/*
2523		 * we need to report rwnd overrun drops.
2524		 */
2525		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2526	}
2527	if (num_chunks) {
2528		/*
2529		 * Did we get data, if so update the time for auto-close and
2530		 * give peer credit for being alive.
2531		 */
2532		SCTP_STAT_INCR(sctps_recvpktwithdata);
2533		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2534			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2535			    stcb->asoc.overall_error_count,
2536			    0,
2537			    SCTP_FROM_SCTP_INDATA,
2538			    __LINE__);
2539		}
2540		stcb->asoc.overall_error_count = 0;
2541		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2542	}
2543	/* now service all of the reassm queue if needed */
2544	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2545		sctp_service_queues(stcb, asoc);
2546
2547	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2548		/* Assure that we ack right away */
2549		stcb->asoc.send_sack = 1;
2550	}
2551	/* Start a sack timer or QUEUE a SACK for sending */
2552	sctp_sack_check(stcb, was_a_gap);
2553	return (0);
2554}
2555
2556static int
2557sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2558    uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2559    int *num_frs,
2560    uint32_t * biggest_newly_acked_tsn,
2561    uint32_t * this_sack_lowest_newack,
2562    int *rto_ok)
2563{
2564	struct sctp_tmit_chunk *tp1;
2565	unsigned int theTSN;
2566	int j, wake_him = 0, circled = 0;
2567
2568	/* Recover the tp1 we last saw */
2569	tp1 = *p_tp1;
2570	if (tp1 == NULL) {
2571		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2572	}
2573	for (j = frag_strt; j <= frag_end; j++) {
2574		theTSN = j + last_tsn;
2575		while (tp1) {
2576			if (tp1->rec.data.doing_fast_retransmit)
2577				(*num_frs) += 1;
2578
2579			/*-
2580			 * CMT: CUCv2 algorithm. For each TSN being
2581			 * processed from the sent queue, track the
2582			 * next expected pseudo-cumack, or
2583			 * rtx_pseudo_cumack, if required. Separate
2584			 * cumack trackers for first transmissions,
2585			 * and retransmissions.
2586			 */
2587			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2588			    (tp1->snd_count == 1)) {
2589				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2590				tp1->whoTo->find_pseudo_cumack = 0;
2591			}
2592			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2593			    (tp1->snd_count > 1)) {
2594				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2595				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2596			}
2597			if (tp1->rec.data.TSN_seq == theTSN) {
2598				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2599					/*-
2600					 * must be held until
2601					 * cum-ack passes
2602					 */
2603					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2604						/*-
2605						 * If it is less than RESEND, it is
2606						 * now no-longer in flight.
2607						 * Higher values may already be set
2608						 * via previous Gap Ack Blocks...
2609						 * i.e. ACKED or RESEND.
2610						 */
2611						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2612						    *biggest_newly_acked_tsn)) {
2613							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2614						}
2615						/*-
2616						 * CMT: SFR algo (and HTNA) - set
2617						 * saw_newack to 1 for dest being
2618						 * newly acked. update
2619						 * this_sack_highest_newack if
2620						 * appropriate.
2621						 */
2622						if (tp1->rec.data.chunk_was_revoked == 0)
2623							tp1->whoTo->saw_newack = 1;
2624
2625						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2626						    tp1->whoTo->this_sack_highest_newack)) {
2627							tp1->whoTo->this_sack_highest_newack =
2628							    tp1->rec.data.TSN_seq;
2629						}
2630						/*-
2631						 * CMT DAC algo: also update
2632						 * this_sack_lowest_newack
2633						 */
2634						if (*this_sack_lowest_newack == 0) {
2635							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2636								sctp_log_sack(*this_sack_lowest_newack,
2637								    last_tsn,
2638								    tp1->rec.data.TSN_seq,
2639								    0,
2640								    0,
2641								    SCTP_LOG_TSN_ACKED);
2642							}
2643							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2644						}
2645						/*-
2646						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2647						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2648						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2649						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2650						 * Separate pseudo_cumack trackers for first transmissions and
2651						 * retransmissions.
2652						 */
2653						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2654							if (tp1->rec.data.chunk_was_revoked == 0) {
2655								tp1->whoTo->new_pseudo_cumack = 1;
2656							}
2657							tp1->whoTo->find_pseudo_cumack = 1;
2658						}
2659						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2660							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2661						}
2662						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2663							if (tp1->rec.data.chunk_was_revoked == 0) {
2664								tp1->whoTo->new_pseudo_cumack = 1;
2665							}
2666							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2667						}
2668						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2669							sctp_log_sack(*biggest_newly_acked_tsn,
2670							    last_tsn,
2671							    tp1->rec.data.TSN_seq,
2672							    frag_strt,
2673							    frag_end,
2674							    SCTP_LOG_TSN_ACKED);
2675						}
2676						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2677							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2678							    tp1->whoTo->flight_size,
2679							    tp1->book_size,
2680							    (uintptr_t) tp1->whoTo,
2681							    tp1->rec.data.TSN_seq);
2682						}
2683						sctp_flight_size_decrease(tp1);
2684						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2685							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2686							    tp1);
2687						}
2688						sctp_total_flight_decrease(stcb, tp1);
2689
2690						tp1->whoTo->net_ack += tp1->send_size;
2691						if (tp1->snd_count < 2) {
2692							/*-
2693							 * True non-retransmited chunk
2694							 */
2695							tp1->whoTo->net_ack2 += tp1->send_size;
2696
2697							/*-
2698							 * update RTO too ?
2699							 */
2700							if (tp1->do_rtt) {
2701								if (*rto_ok) {
2702									tp1->whoTo->RTO =
2703									    sctp_calculate_rto(stcb,
2704									    &stcb->asoc,
2705									    tp1->whoTo,
2706									    &tp1->sent_rcv_time,
2707									    sctp_align_safe_nocopy,
2708									    SCTP_RTT_FROM_DATA);
2709									*rto_ok = 0;
2710								}
2711								if (tp1->whoTo->rto_needed == 0) {
2712									tp1->whoTo->rto_needed = 1;
2713								}
2714								tp1->do_rtt = 0;
2715							}
2716						}
2717					}
2718					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2719						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2720						    stcb->asoc.this_sack_highest_gap)) {
2721							stcb->asoc.this_sack_highest_gap =
2722							    tp1->rec.data.TSN_seq;
2723						}
2724						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2725							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2726#ifdef SCTP_AUDITING_ENABLED
2727							sctp_audit_log(0xB2,
2728							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2729#endif
2730						}
2731					}
2732					/*-
2733					 * All chunks NOT UNSENT fall through here and are marked
2734					 * (leave PR-SCTP ones that are to skip alone though)
2735					 */
2736					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2737					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2738						tp1->sent = SCTP_DATAGRAM_MARKED;
2739					}
2740					if (tp1->rec.data.chunk_was_revoked) {
2741						/* deflate the cwnd */
2742						tp1->whoTo->cwnd -= tp1->book_size;
2743						tp1->rec.data.chunk_was_revoked = 0;
2744					}
2745					/* NR Sack code here */
2746					if (nr_sacking &&
2747					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2748						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2749							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2750#ifdef INVARIANTS
2751						} else {
2752							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2753#endif
2754						}
2755						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2756						if (tp1->data) {
2757							/*
2758							 * sa_ignore
2759							 * NO_NULL_CHK
2760							 */
2761							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2762							sctp_m_freem(tp1->data);
2763							tp1->data = NULL;
2764						}
2765						wake_him++;
2766					}
2767				}
2768				break;
2769			}	/* if (tp1->TSN_seq == theTSN) */
2770			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2771				break;
2772			}
2773			tp1 = TAILQ_NEXT(tp1, sctp_next);
2774			if ((tp1 == NULL) && (circled == 0)) {
2775				circled++;
2776				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2777			}
2778		}		/* end while (tp1) */
2779		if (tp1 == NULL) {
2780			circled = 0;
2781			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2782		}
2783		/* In case the fragments were not in order we must reset */
2784	}			/* end for (j = fragStart */
2785	*p_tp1 = tp1;
2786	return (wake_him);	/* Return value only used for nr-sack */
2787}
2788
2789
2790static int
2791sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2792    uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2793    uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2794    int num_seg, int num_nr_seg, int *rto_ok)
2795{
2796	struct sctp_gap_ack_block *frag, block;
2797	struct sctp_tmit_chunk *tp1;
2798	int i;
2799	int num_frs = 0;
2800	int chunk_freed;
2801	int non_revocable;
2802	uint16_t frag_strt, frag_end, prev_frag_end;
2803
2804	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2805	prev_frag_end = 0;
2806	chunk_freed = 0;
2807
2808	for (i = 0; i < (num_seg + num_nr_seg); i++) {
2809		if (i == num_seg) {
2810			prev_frag_end = 0;
2811			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2812		}
2813		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2814		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2815		*offset += sizeof(block);
2816		if (frag == NULL) {
2817			return (chunk_freed);
2818		}
2819		frag_strt = ntohs(frag->start);
2820		frag_end = ntohs(frag->end);
2821
2822		if (frag_strt > frag_end) {
2823			/* This gap report is malformed, skip it. */
2824			continue;
2825		}
2826		if (frag_strt <= prev_frag_end) {
2827			/* This gap report is not in order, so restart. */
2828			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2829		}
2830		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2831			*biggest_tsn_acked = last_tsn + frag_end;
2832		}
2833		if (i < num_seg) {
2834			non_revocable = 0;
2835		} else {
2836			non_revocable = 1;
2837		}
2838		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2839		    non_revocable, &num_frs, biggest_newly_acked_tsn,
2840		    this_sack_lowest_newack, rto_ok)) {
2841			chunk_freed = 1;
2842		}
2843		prev_frag_end = frag_end;
2844	}
2845	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2846		if (num_frs)
2847			sctp_log_fr(*biggest_tsn_acked,
2848			    *biggest_newly_acked_tsn,
2849			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2850	}
2851	return (chunk_freed);
2852}
2853
2854static void
2855sctp_check_for_revoked(struct sctp_tcb *stcb,
2856    struct sctp_association *asoc, uint32_t cumack,
2857    uint32_t biggest_tsn_acked)
2858{
2859	struct sctp_tmit_chunk *tp1;
2860
2861	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2862		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2863			/*
2864			 * ok this guy is either ACK or MARKED. If it is
2865			 * ACKED it has been previously acked but not this
2866			 * time i.e. revoked.  If it is MARKED it was ACK'ed
2867			 * again.
2868			 */
2869			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2870				break;
2871			}
2872			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2873				/* it has been revoked */
2874				tp1->sent = SCTP_DATAGRAM_SENT;
2875				tp1->rec.data.chunk_was_revoked = 1;
2876				/*
2877				 * We must add this stuff back in to assure
2878				 * timers and such get started.
2879				 */
2880				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2881					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2882					    tp1->whoTo->flight_size,
2883					    tp1->book_size,
2884					    (uintptr_t) tp1->whoTo,
2885					    tp1->rec.data.TSN_seq);
2886				}
2887				sctp_flight_size_increase(tp1);
2888				sctp_total_flight_increase(stcb, tp1);
2889				/*
2890				 * We inflate the cwnd to compensate for our
2891				 * artificial inflation of the flight_size.
2892				 */
2893				tp1->whoTo->cwnd += tp1->book_size;
2894				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2895					sctp_log_sack(asoc->last_acked_seq,
2896					    cumack,
2897					    tp1->rec.data.TSN_seq,
2898					    0,
2899					    0,
2900					    SCTP_LOG_TSN_REVOKED);
2901				}
2902			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2903				/* it has been re-acked in this SACK */
2904				tp1->sent = SCTP_DATAGRAM_ACKED;
2905			}
2906		}
2907		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2908			break;
2909	}
2910}
2911
2912
2913static void
2914sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2915    uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2916{
2917	struct sctp_tmit_chunk *tp1;
2918	int strike_flag = 0;
2919	struct timeval now;
2920	int tot_retrans = 0;
2921	uint32_t sending_seq;
2922	struct sctp_nets *net;
2923	int num_dests_sacked = 0;
2924
2925	/*
2926	 * select the sending_seq, this is either the next thing ready to be
2927	 * sent but not transmitted, OR, the next seq we assign.
2928	 */
2929	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2930	if (tp1 == NULL) {
2931		sending_seq = asoc->sending_seq;
2932	} else {
2933		sending_seq = tp1->rec.data.TSN_seq;
2934	}
2935
2936	/* CMT DAC algo: finding out if SACK is a mixed SACK */
2937	if ((asoc->sctp_cmt_on_off > 0) &&
2938	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2939		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2940			if (net->saw_newack)
2941				num_dests_sacked++;
2942		}
2943	}
2944	if (stcb->asoc.peer_supports_prsctp) {
2945		(void)SCTP_GETTIME_TIMEVAL(&now);
2946	}
2947	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2948		strike_flag = 0;
2949		if (tp1->no_fr_allowed) {
2950			/* this one had a timeout or something */
2951			continue;
2952		}
2953		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2954			if (tp1->sent < SCTP_DATAGRAM_RESEND)
2955				sctp_log_fr(biggest_tsn_newly_acked,
2956				    tp1->rec.data.TSN_seq,
2957				    tp1->sent,
2958				    SCTP_FR_LOG_CHECK_STRIKE);
2959		}
2960		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2961		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2962			/* done */
2963			break;
2964		}
2965		if (stcb->asoc.peer_supports_prsctp) {
2966			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2967				/* Is it expired? */
2968				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2969					/* Yes so drop it */
2970					if (tp1->data != NULL) {
2971						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2972						    SCTP_SO_NOT_LOCKED);
2973					}
2974					continue;
2975				}
2976			}
2977		}
2978		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2979			/* we are beyond the tsn in the sack  */
2980			break;
2981		}
2982		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2983			/* either a RESEND, ACKED, or MARKED */
2984			/* skip */
2985			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
2986				/* Continue strikin FWD-TSN chunks */
2987				tp1->rec.data.fwd_tsn_cnt++;
2988			}
2989			continue;
2990		}
2991		/*
2992		 * CMT : SFR algo (covers part of DAC and HTNA as well)
2993		 */
2994		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
2995			/*
2996			 * No new acks were receieved for data sent to this
2997			 * dest. Therefore, according to the SFR algo for
2998			 * CMT, no data sent to this dest can be marked for
2999			 * FR using this SACK.
3000			 */
3001			continue;
3002		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3003		    tp1->whoTo->this_sack_highest_newack)) {
3004			/*
3005			 * CMT: New acks were receieved for data sent to
3006			 * this dest. But no new acks were seen for data
3007			 * sent after tp1. Therefore, according to the SFR
3008			 * algo for CMT, tp1 cannot be marked for FR using
3009			 * this SACK. This step covers part of the DAC algo
3010			 * and the HTNA algo as well.
3011			 */
3012			continue;
3013		}
3014		/*
3015		 * Here we check to see if we were have already done a FR
3016		 * and if so we see if the biggest TSN we saw in the sack is
3017		 * smaller than the recovery point. If so we don't strike
3018		 * the tsn... otherwise we CAN strike the TSN.
3019		 */
3020		/*
3021		 * @@@ JRI: Check for CMT if (accum_moved &&
3022		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3023		 * 0)) {
3024		 */
3025		if (accum_moved && asoc->fast_retran_loss_recovery) {
3026			/*
3027			 * Strike the TSN if in fast-recovery and cum-ack
3028			 * moved.
3029			 */
3030			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3031				sctp_log_fr(biggest_tsn_newly_acked,
3032				    tp1->rec.data.TSN_seq,
3033				    tp1->sent,
3034				    SCTP_FR_LOG_STRIKE_CHUNK);
3035			}
3036			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3037				tp1->sent++;
3038			}
3039			if ((asoc->sctp_cmt_on_off > 0) &&
3040			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3041				/*
3042				 * CMT DAC algorithm: If SACK flag is set to
3043				 * 0, then lowest_newack test will not pass
3044				 * because it would have been set to the
3045				 * cumack earlier. If not already to be
3046				 * rtx'd, If not a mixed sack and if tp1 is
3047				 * not between two sacked TSNs, then mark by
3048				 * one more. NOTE that we are marking by one
3049				 * additional time since the SACK DAC flag
3050				 * indicates that two packets have been
3051				 * received after this missing TSN.
3052				 */
3053				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3054				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3055					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3056						sctp_log_fr(16 + num_dests_sacked,
3057						    tp1->rec.data.TSN_seq,
3058						    tp1->sent,
3059						    SCTP_FR_LOG_STRIKE_CHUNK);
3060					}
3061					tp1->sent++;
3062				}
3063			}
3064		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3065		    (asoc->sctp_cmt_on_off == 0)) {
3066			/*
3067			 * For those that have done a FR we must take
3068			 * special consideration if we strike. I.e the
3069			 * biggest_newly_acked must be higher than the
3070			 * sending_seq at the time we did the FR.
3071			 */
3072			if (
3073#ifdef SCTP_FR_TO_ALTERNATE
3074			/*
3075			 * If FR's go to new networks, then we must only do
3076			 * this for singly homed asoc's. However if the FR's
3077			 * go to the same network (Armando's work) then its
3078			 * ok to FR multiple times.
3079			 */
3080			    (asoc->numnets < 2)
3081#else
3082			    (1)
3083#endif
3084			    ) {
3085
3086				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3087				    tp1->rec.data.fast_retran_tsn)) {
3088					/*
3089					 * Strike the TSN, since this ack is
3090					 * beyond where things were when we
3091					 * did a FR.
3092					 */
3093					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3094						sctp_log_fr(biggest_tsn_newly_acked,
3095						    tp1->rec.data.TSN_seq,
3096						    tp1->sent,
3097						    SCTP_FR_LOG_STRIKE_CHUNK);
3098					}
3099					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3100						tp1->sent++;
3101					}
3102					strike_flag = 1;
3103					if ((asoc->sctp_cmt_on_off > 0) &&
3104					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3105						/*
3106						 * CMT DAC algorithm: If
3107						 * SACK flag is set to 0,
3108						 * then lowest_newack test
3109						 * will not pass because it
3110						 * would have been set to
3111						 * the cumack earlier. If
3112						 * not already to be rtx'd,
3113						 * If not a mixed sack and
3114						 * if tp1 is not between two
3115						 * sacked TSNs, then mark by
3116						 * one more. NOTE that we
3117						 * are marking by one
3118						 * additional time since the
3119						 * SACK DAC flag indicates
3120						 * that two packets have
3121						 * been received after this
3122						 * missing TSN.
3123						 */
3124						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3125						    (num_dests_sacked == 1) &&
3126						    SCTP_TSN_GT(this_sack_lowest_newack,
3127						    tp1->rec.data.TSN_seq)) {
3128							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3129								sctp_log_fr(32 + num_dests_sacked,
3130								    tp1->rec.data.TSN_seq,
3131								    tp1->sent,
3132								    SCTP_FR_LOG_STRIKE_CHUNK);
3133							}
3134							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3135								tp1->sent++;
3136							}
3137						}
3138					}
3139				}
3140			}
3141			/*
3142			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3143			 * algo covers HTNA.
3144			 */
3145		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3146		    biggest_tsn_newly_acked)) {
3147			/*
3148			 * We don't strike these: This is the  HTNA
3149			 * algorithm i.e. we don't strike If our TSN is
3150			 * larger than the Highest TSN Newly Acked.
3151			 */
3152			;
3153		} else {
3154			/* Strike the TSN */
3155			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3156				sctp_log_fr(biggest_tsn_newly_acked,
3157				    tp1->rec.data.TSN_seq,
3158				    tp1->sent,
3159				    SCTP_FR_LOG_STRIKE_CHUNK);
3160			}
3161			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3162				tp1->sent++;
3163			}
3164			if ((asoc->sctp_cmt_on_off > 0) &&
3165			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3166				/*
3167				 * CMT DAC algorithm: If SACK flag is set to
3168				 * 0, then lowest_newack test will not pass
3169				 * because it would have been set to the
3170				 * cumack earlier. If not already to be
3171				 * rtx'd, If not a mixed sack and if tp1 is
3172				 * not between two sacked TSNs, then mark by
3173				 * one more. NOTE that we are marking by one
3174				 * additional time since the SACK DAC flag
3175				 * indicates that two packets have been
3176				 * received after this missing TSN.
3177				 */
3178				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3179				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3180					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3181						sctp_log_fr(48 + num_dests_sacked,
3182						    tp1->rec.data.TSN_seq,
3183						    tp1->sent,
3184						    SCTP_FR_LOG_STRIKE_CHUNK);
3185					}
3186					tp1->sent++;
3187				}
3188			}
3189		}
3190		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3191			struct sctp_nets *alt;
3192
3193			/* fix counts and things */
3194			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3195				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3196				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3197				    tp1->book_size,
3198				    (uintptr_t) tp1->whoTo,
3199				    tp1->rec.data.TSN_seq);
3200			}
3201			if (tp1->whoTo) {
3202				tp1->whoTo->net_ack++;
3203				sctp_flight_size_decrease(tp1);
3204				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3205					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3206					    tp1);
3207				}
3208			}
3209			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3210				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3211				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3212			}
3213			/* add back to the rwnd */
3214			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3215
3216			/* remove from the total flight */
3217			sctp_total_flight_decrease(stcb, tp1);
3218
3219			if ((stcb->asoc.peer_supports_prsctp) &&
3220			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3221				/*
3222				 * Has it been retransmitted tv_sec times? -
3223				 * we store the retran count there.
3224				 */
3225				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3226					/* Yes, so drop it */
3227					if (tp1->data != NULL) {
3228						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3229						    SCTP_SO_NOT_LOCKED);
3230					}
3231					/* Make sure to flag we had a FR */
3232					tp1->whoTo->net_ack++;
3233					continue;
3234				}
3235			}
3236			/*
3237			 * SCTP_PRINTF("OK, we are now ready to FR this
3238			 * guy\n");
3239			 */
3240			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3241				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3242				    0, SCTP_FR_MARKED);
3243			}
3244			if (strike_flag) {
3245				/* This is a subsequent FR */
3246				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3247			}
3248			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3249			if (asoc->sctp_cmt_on_off > 0) {
3250				/*
3251				 * CMT: Using RTX_SSTHRESH policy for CMT.
3252				 * If CMT is being used, then pick dest with
3253				 * largest ssthresh for any retransmission.
3254				 */
3255				tp1->no_fr_allowed = 1;
3256				alt = tp1->whoTo;
3257				/* sa_ignore NO_NULL_CHK */
3258				if (asoc->sctp_cmt_pf > 0) {
3259					/*
3260					 * JRS 5/18/07 - If CMT PF is on,
3261					 * use the PF version of
3262					 * find_alt_net()
3263					 */
3264					alt = sctp_find_alternate_net(stcb, alt, 2);
3265				} else {
3266					/*
3267					 * JRS 5/18/07 - If only CMT is on,
3268					 * use the CMT version of
3269					 * find_alt_net()
3270					 */
3271					/* sa_ignore NO_NULL_CHK */
3272					alt = sctp_find_alternate_net(stcb, alt, 1);
3273				}
3274				if (alt == NULL) {
3275					alt = tp1->whoTo;
3276				}
3277				/*
3278				 * CUCv2: If a different dest is picked for
3279				 * the retransmission, then new
3280				 * (rtx-)pseudo_cumack needs to be tracked
3281				 * for orig dest. Let CUCv2 track new (rtx-)
3282				 * pseudo-cumack always.
3283				 */
3284				if (tp1->whoTo) {
3285					tp1->whoTo->find_pseudo_cumack = 1;
3286					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3287				}
3288			} else {/* CMT is OFF */
3289
3290#ifdef SCTP_FR_TO_ALTERNATE
3291				/* Can we find an alternate? */
3292				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3293#else
3294				/*
3295				 * default behavior is to NOT retransmit
3296				 * FR's to an alternate. Armando Caro's
3297				 * paper details why.
3298				 */
3299				alt = tp1->whoTo;
3300#endif
3301			}
3302
3303			tp1->rec.data.doing_fast_retransmit = 1;
3304			tot_retrans++;
3305			/* mark the sending seq for possible subsequent FR's */
3306			/*
3307			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3308			 * (uint32_t)tpi->rec.data.TSN_seq);
3309			 */
3310			if (TAILQ_EMPTY(&asoc->send_queue)) {
3311				/*
3312				 * If the queue of send is empty then its
3313				 * the next sequence number that will be
3314				 * assigned so we subtract one from this to
3315				 * get the one we last sent.
3316				 */
3317				tp1->rec.data.fast_retran_tsn = sending_seq;
3318			} else {
3319				/*
3320				 * If there are chunks on the send queue
3321				 * (unsent data that has made it from the
3322				 * stream queues but not out the door, we
3323				 * take the first one (which will have the
3324				 * lowest TSN) and subtract one to get the
3325				 * one we last sent.
3326				 */
3327				struct sctp_tmit_chunk *ttt;
3328
3329				ttt = TAILQ_FIRST(&asoc->send_queue);
3330				tp1->rec.data.fast_retran_tsn =
3331				    ttt->rec.data.TSN_seq;
3332			}
3333
3334			if (tp1->do_rtt) {
3335				/*
3336				 * this guy had a RTO calculation pending on
3337				 * it, cancel it
3338				 */
3339				if ((tp1->whoTo != NULL) &&
3340				    (tp1->whoTo->rto_needed == 0)) {
3341					tp1->whoTo->rto_needed = 1;
3342				}
3343				tp1->do_rtt = 0;
3344			}
3345			if (alt != tp1->whoTo) {
3346				/* yes, there is an alternate. */
3347				sctp_free_remote_addr(tp1->whoTo);
3348				/* sa_ignore FREED_MEMORY */
3349				tp1->whoTo = alt;
3350				atomic_add_int(&alt->ref_count, 1);
3351			}
3352		}
3353	}
3354}
3355
3356struct sctp_tmit_chunk *
3357sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3358    struct sctp_association *asoc)
3359{
3360	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3361	struct timeval now;
3362	int now_filled = 0;
3363
3364	if (asoc->peer_supports_prsctp == 0) {
3365		return (NULL);
3366	}
3367	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3368		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3369		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3370		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3371			/* no chance to advance, out of here */
3372			break;
3373		}
3374		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3375			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3376			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3377				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3378				    asoc->advanced_peer_ack_point,
3379				    tp1->rec.data.TSN_seq, 0, 0);
3380			}
3381		}
3382		if (!PR_SCTP_ENABLED(tp1->flags)) {
3383			/*
3384			 * We can't fwd-tsn past any that are reliable aka
3385			 * retransmitted until the asoc fails.
3386			 */
3387			break;
3388		}
3389		if (!now_filled) {
3390			(void)SCTP_GETTIME_TIMEVAL(&now);
3391			now_filled = 1;
3392		}
3393		/*
3394		 * now we got a chunk which is marked for another
3395		 * retransmission to a PR-stream but has run out its chances
3396		 * already maybe OR has been marked to skip now. Can we skip
3397		 * it if its a resend?
3398		 */
3399		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3400		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3401			/*
3402			 * Now is this one marked for resend and its time is
3403			 * now up?
3404			 */
3405			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3406				/* Yes so drop it */
3407				if (tp1->data) {
3408					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3409					    1, SCTP_SO_NOT_LOCKED);
3410				}
3411			} else {
3412				/*
3413				 * No, we are done when hit one for resend
3414				 * whos time as not expired.
3415				 */
3416				break;
3417			}
3418		}
3419		/*
3420		 * Ok now if this chunk is marked to drop it we can clean up
3421		 * the chunk, advance our peer ack point and we can check
3422		 * the next chunk.
3423		 */
3424		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3425		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3426			/* advance PeerAckPoint goes forward */
3427			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3428				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3429				a_adv = tp1;
3430			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3431				/* No update but we do save the chk */
3432				a_adv = tp1;
3433			}
3434		} else {
3435			/*
3436			 * If it is still in RESEND we can advance no
3437			 * further
3438			 */
3439			break;
3440		}
3441	}
3442	return (a_adv);
3443}
3444
3445static int
3446sctp_fs_audit(struct sctp_association *asoc)
3447{
3448	struct sctp_tmit_chunk *chk;
3449	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3450	int entry_flight, entry_cnt, ret;
3451
3452	entry_flight = asoc->total_flight;
3453	entry_cnt = asoc->total_flight_count;
3454	ret = 0;
3455
3456	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3457		return (0);
3458
3459	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3460		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3461			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3462			    chk->rec.data.TSN_seq,
3463			    chk->send_size,
3464			    chk->snd_count);
3465			inflight++;
3466		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3467			resend++;
3468		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3469			inbetween++;
3470		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3471			above++;
3472		} else {
3473			acked++;
3474		}
3475	}
3476
3477	if ((inflight > 0) || (inbetween > 0)) {
3478#ifdef INVARIANTS
3479		panic("Flight size-express incorrect? \n");
3480#else
3481		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3482		    entry_flight, entry_cnt);
3483
3484		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3485		    inflight, inbetween, resend, above, acked);
3486		ret = 1;
3487#endif
3488	}
3489	return (ret);
3490}
3491
3492
3493static void
3494sctp_window_probe_recovery(struct sctp_tcb *stcb,
3495    struct sctp_association *asoc,
3496    struct sctp_tmit_chunk *tp1)
3497{
3498	tp1->window_probe = 0;
3499	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3500		/* TSN's skipped we do NOT move back. */
3501		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3502		    tp1->whoTo->flight_size,
3503		    tp1->book_size,
3504		    (uintptr_t) tp1->whoTo,
3505		    tp1->rec.data.TSN_seq);
3506		return;
3507	}
3508	/* First setup this by shrinking flight */
3509	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3510		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3511		    tp1);
3512	}
3513	sctp_flight_size_decrease(tp1);
3514	sctp_total_flight_decrease(stcb, tp1);
3515	/* Now mark for resend */
3516	tp1->sent = SCTP_DATAGRAM_RESEND;
3517	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3518
3519	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3520		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3521		    tp1->whoTo->flight_size,
3522		    tp1->book_size,
3523		    (uintptr_t) tp1->whoTo,
3524		    tp1->rec.data.TSN_seq);
3525	}
3526}
3527
3528void
3529sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3530    uint32_t rwnd, int *abort_now, int ecne_seen)
3531{
3532	struct sctp_nets *net;
3533	struct sctp_association *asoc;
3534	struct sctp_tmit_chunk *tp1, *tp2;
3535	uint32_t old_rwnd;
3536	int win_probe_recovery = 0;
3537	int win_probe_recovered = 0;
3538	int j, done_once = 0;
3539	int rto_ok = 1;
3540
3541	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3542		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3543		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3544	}
3545	SCTP_TCB_LOCK_ASSERT(stcb);
3546#ifdef SCTP_ASOCLOG_OF_TSNS
3547	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3548	stcb->asoc.cumack_log_at++;
3549	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3550		stcb->asoc.cumack_log_at = 0;
3551	}
3552#endif
3553	asoc = &stcb->asoc;
3554	old_rwnd = asoc->peers_rwnd;
3555	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3556		/* old ack */
3557		return;
3558	} else if (asoc->last_acked_seq == cumack) {
3559		/* Window update sack */
3560		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3561		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3562		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3563			/* SWS sender side engages */
3564			asoc->peers_rwnd = 0;
3565		}
3566		if (asoc->peers_rwnd > old_rwnd) {
3567			goto again;
3568		}
3569		return;
3570	}
3571	/* First setup for CC stuff */
3572	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3573		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3574			/* Drag along the window_tsn for cwr's */
3575			net->cwr_window_tsn = cumack;
3576		}
3577		net->prev_cwnd = net->cwnd;
3578		net->net_ack = 0;
3579		net->net_ack2 = 0;
3580
3581		/*
3582		 * CMT: Reset CUC and Fast recovery algo variables before
3583		 * SACK processing
3584		 */
3585		net->new_pseudo_cumack = 0;
3586		net->will_exit_fast_recovery = 0;
3587		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3588			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3589		}
3590	}
3591	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3592		uint32_t send_s;
3593
3594		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3595			tp1 = TAILQ_LAST(&asoc->sent_queue,
3596			    sctpchunk_listhead);
3597			send_s = tp1->rec.data.TSN_seq + 1;
3598		} else {
3599			send_s = asoc->sending_seq;
3600		}
3601		if (SCTP_TSN_GE(cumack, send_s)) {
3602#ifndef INVARIANTS
3603			struct mbuf *op_err;
3604			char msg[SCTP_DIAG_INFO_LEN];
3605
3606#endif
3607#ifdef INVARIANTS
3608			panic("Impossible sack 1");
3609#else
3610
3611			*abort_now = 1;
3612			/* XXX */
3613			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
3614			    cumack, send_s);
3615			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3616			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3617			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3618			return;
3619#endif
3620		}
3621	}
3622	asoc->this_sack_highest_gap = cumack;
3623	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3624		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3625		    stcb->asoc.overall_error_count,
3626		    0,
3627		    SCTP_FROM_SCTP_INDATA,
3628		    __LINE__);
3629	}
3630	stcb->asoc.overall_error_count = 0;
3631	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3632		/* process the new consecutive TSN first */
3633		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3634			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3635				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3636					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3637				}
3638				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3639					/*
3640					 * If it is less than ACKED, it is
3641					 * now no-longer in flight. Higher
3642					 * values may occur during marking
3643					 */
3644					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3645						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3646							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3647							    tp1->whoTo->flight_size,
3648							    tp1->book_size,
3649							    (uintptr_t) tp1->whoTo,
3650							    tp1->rec.data.TSN_seq);
3651						}
3652						sctp_flight_size_decrease(tp1);
3653						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3654							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3655							    tp1);
3656						}
3657						/* sa_ignore NO_NULL_CHK */
3658						sctp_total_flight_decrease(stcb, tp1);
3659					}
3660					tp1->whoTo->net_ack += tp1->send_size;
3661					if (tp1->snd_count < 2) {
3662						/*
3663						 * True non-retransmited
3664						 * chunk
3665						 */
3666						tp1->whoTo->net_ack2 +=
3667						    tp1->send_size;
3668
3669						/* update RTO too? */
3670						if (tp1->do_rtt) {
3671							if (rto_ok) {
3672								tp1->whoTo->RTO =
3673								/*
3674								 * sa_ignore
3675								 * NO_NULL_CH
3676								 * K
3677								 */
3678								    sctp_calculate_rto(stcb,
3679								    asoc, tp1->whoTo,
3680								    &tp1->sent_rcv_time,
3681								    sctp_align_safe_nocopy,
3682								    SCTP_RTT_FROM_DATA);
3683								rto_ok = 0;
3684							}
3685							if (tp1->whoTo->rto_needed == 0) {
3686								tp1->whoTo->rto_needed = 1;
3687							}
3688							tp1->do_rtt = 0;
3689						}
3690					}
3691					/*
3692					 * CMT: CUCv2 algorithm. From the
3693					 * cumack'd TSNs, for each TSN being
3694					 * acked for the first time, set the
3695					 * following variables for the
3696					 * corresp destination.
3697					 * new_pseudo_cumack will trigger a
3698					 * cwnd update.
3699					 * find_(rtx_)pseudo_cumack will
3700					 * trigger search for the next
3701					 * expected (rtx-)pseudo-cumack.
3702					 */
3703					tp1->whoTo->new_pseudo_cumack = 1;
3704					tp1->whoTo->find_pseudo_cumack = 1;
3705					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3706
3707					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3708						/* sa_ignore NO_NULL_CHK */
3709						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3710					}
3711				}
3712				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3713					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3714				}
3715				if (tp1->rec.data.chunk_was_revoked) {
3716					/* deflate the cwnd */
3717					tp1->whoTo->cwnd -= tp1->book_size;
3718					tp1->rec.data.chunk_was_revoked = 0;
3719				}
3720				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3721					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3722						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3723#ifdef INVARIANTS
3724					} else {
3725						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3726#endif
3727					}
3728				}
3729				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3730				if (tp1->data) {
3731					/* sa_ignore NO_NULL_CHK */
3732					sctp_free_bufspace(stcb, asoc, tp1, 1);
3733					sctp_m_freem(tp1->data);
3734					tp1->data = NULL;
3735				}
3736				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3737					sctp_log_sack(asoc->last_acked_seq,
3738					    cumack,
3739					    tp1->rec.data.TSN_seq,
3740					    0,
3741					    0,
3742					    SCTP_LOG_FREE_SENT);
3743				}
3744				asoc->sent_queue_cnt--;
3745				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3746			} else {
3747				break;
3748			}
3749		}
3750
3751	}
3752	/* sa_ignore NO_NULL_CHK */
3753	if (stcb->sctp_socket) {
3754#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3755		struct socket *so;
3756
3757#endif
3758		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3759		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3760			/* sa_ignore NO_NULL_CHK */
3761			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3762		}
3763#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3764		so = SCTP_INP_SO(stcb->sctp_ep);
3765		atomic_add_int(&stcb->asoc.refcnt, 1);
3766		SCTP_TCB_UNLOCK(stcb);
3767		SCTP_SOCKET_LOCK(so, 1);
3768		SCTP_TCB_LOCK(stcb);
3769		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3770		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3771			/* assoc was freed while we were unlocked */
3772			SCTP_SOCKET_UNLOCK(so, 1);
3773			return;
3774		}
3775#endif
3776		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3777#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3778		SCTP_SOCKET_UNLOCK(so, 1);
3779#endif
3780	} else {
3781		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3782			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3783		}
3784	}
3785
3786	/* JRS - Use the congestion control given in the CC module */
3787	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3788		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3789			if (net->net_ack2 > 0) {
3790				/*
3791				 * Karn's rule applies to clearing error
3792				 * count, this is optional.
3793				 */
3794				net->error_count = 0;
3795				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3796					/* addr came good */
3797					net->dest_state |= SCTP_ADDR_REACHABLE;
3798					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3799					    0, (void *)net, SCTP_SO_NOT_LOCKED);
3800				}
3801				if (net == stcb->asoc.primary_destination) {
3802					if (stcb->asoc.alternate) {
3803						/*
3804						 * release the alternate,
3805						 * primary is good
3806						 */
3807						sctp_free_remote_addr(stcb->asoc.alternate);
3808						stcb->asoc.alternate = NULL;
3809					}
3810				}
3811				if (net->dest_state & SCTP_ADDR_PF) {
3812					net->dest_state &= ~SCTP_ADDR_PF;
3813					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
3814					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3815					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3816					/* Done with this net */
3817					net->net_ack = 0;
3818				}
3819				/* restore any doubled timers */
3820				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3821				if (net->RTO < stcb->asoc.minrto) {
3822					net->RTO = stcb->asoc.minrto;
3823				}
3824				if (net->RTO > stcb->asoc.maxrto) {
3825					net->RTO = stcb->asoc.maxrto;
3826				}
3827			}
3828		}
3829		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3830	}
3831	asoc->last_acked_seq = cumack;
3832
3833	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3834		/* nothing left in-flight */
3835		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3836			net->flight_size = 0;
3837			net->partial_bytes_acked = 0;
3838		}
3839		asoc->total_flight = 0;
3840		asoc->total_flight_count = 0;
3841	}
3842	/* RWND update */
3843	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3844	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3845	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3846		/* SWS sender side engages */
3847		asoc->peers_rwnd = 0;
3848	}
3849	if (asoc->peers_rwnd > old_rwnd) {
3850		win_probe_recovery = 1;
3851	}
3852	/* Now assure a timer where data is queued at */
3853again:
3854	j = 0;
3855	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3856		int to_ticks;
3857
3858		if (win_probe_recovery && (net->window_probe)) {
3859			win_probe_recovered = 1;
3860			/*
3861			 * Find first chunk that was used with window probe
3862			 * and clear the sent
3863			 */
3864			/* sa_ignore FREED_MEMORY */
3865			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3866				if (tp1->window_probe) {
3867					/* move back to data send queue */
3868					sctp_window_probe_recovery(stcb, asoc, tp1);
3869					break;
3870				}
3871			}
3872		}
3873		if (net->RTO == 0) {
3874			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3875		} else {
3876			to_ticks = MSEC_TO_TICKS(net->RTO);
3877		}
3878		if (net->flight_size) {
3879			j++;
3880			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3881			    sctp_timeout_handler, &net->rxt_timer);
3882			if (net->window_probe) {
3883				net->window_probe = 0;
3884			}
3885		} else {
3886			if (net->window_probe) {
3887				/*
3888				 * In window probes we must assure a timer
3889				 * is still running there
3890				 */
3891				net->window_probe = 0;
3892				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3893					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3894					    sctp_timeout_handler, &net->rxt_timer);
3895				}
3896			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3897				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3898				    stcb, net,
3899				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
3900			}
3901		}
3902	}
3903	if ((j == 0) &&
3904	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3905	    (asoc->sent_queue_retran_cnt == 0) &&
3906	    (win_probe_recovered == 0) &&
3907	    (done_once == 0)) {
3908		/*
3909		 * huh, this should not happen unless all packets are
3910		 * PR-SCTP and marked to skip of course.
3911		 */
3912		if (sctp_fs_audit(asoc)) {
3913			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3914				net->flight_size = 0;
3915			}
3916			asoc->total_flight = 0;
3917			asoc->total_flight_count = 0;
3918			asoc->sent_queue_retran_cnt = 0;
3919			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3920				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3921					sctp_flight_size_increase(tp1);
3922					sctp_total_flight_increase(stcb, tp1);
3923				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3924					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3925				}
3926			}
3927		}
3928		done_once = 1;
3929		goto again;
3930	}
3931	/**********************************/
3932	/* Now what about shutdown issues */
3933	/**********************************/
3934	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3935		/* nothing left on sendqueue.. consider done */
3936		/* clean up */
3937		if ((asoc->stream_queue_cnt == 1) &&
3938		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3939		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3940		    (asoc->locked_on_sending)
3941		    ) {
3942			struct sctp_stream_queue_pending *sp;
3943
3944			/*
3945			 * I may be in a state where we got all across.. but
3946			 * cannot write more due to a shutdown... we abort
3947			 * since the user did not indicate EOR in this case.
3948			 * The sp will be cleaned during free of the asoc.
3949			 */
3950			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3951			    sctp_streamhead);
3952			if ((sp) && (sp->length == 0)) {
3953				/* Let cleanup code purge it */
3954				if (sp->msg_is_complete) {
3955					asoc->stream_queue_cnt--;
3956				} else {
3957					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3958					asoc->locked_on_sending = NULL;
3959					asoc->stream_queue_cnt--;
3960				}
3961			}
3962		}
3963		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3964		    (asoc->stream_queue_cnt == 0)) {
3965			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3966				/* Need to abort here */
3967				struct mbuf *op_err;
3968
3969		abort_out_now:
3970				*abort_now = 1;
3971				/* XXX */
3972				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3973				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
3974				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3975			} else {
3976				struct sctp_nets *netp;
3977
3978				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3979				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3980					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3981				}
3982				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
3983				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
3984				sctp_stop_timers_for_shutdown(stcb);
3985				if (asoc->alternate) {
3986					netp = asoc->alternate;
3987				} else {
3988					netp = asoc->primary_destination;
3989				}
3990				sctp_send_shutdown(stcb, netp);
3991				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
3992				    stcb->sctp_ep, stcb, netp);
3993				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
3994				    stcb->sctp_ep, stcb, netp);
3995			}
3996		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
3997		    (asoc->stream_queue_cnt == 0)) {
3998			struct sctp_nets *netp;
3999
4000			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4001				goto abort_out_now;
4002			}
4003			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4004			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4005			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4006			sctp_stop_timers_for_shutdown(stcb);
4007			if (asoc->alternate) {
4008				netp = asoc->alternate;
4009			} else {
4010				netp = asoc->primary_destination;
4011			}
4012			sctp_send_shutdown_ack(stcb, netp);
4013			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4014			    stcb->sctp_ep, stcb, netp);
4015		}
4016	}
4017	/*********************************************/
4018	/* Here we perform PR-SCTP procedures        */
4019	/* (section 4.2)                             */
4020	/*********************************************/
4021	/* C1. update advancedPeerAckPoint */
4022	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4023		asoc->advanced_peer_ack_point = cumack;
4024	}
4025	/* PR-Sctp issues need to be addressed too */
4026	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4027		struct sctp_tmit_chunk *lchk;
4028		uint32_t old_adv_peer_ack_point;
4029
4030		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4031		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4032		/* C3. See if we need to send a Fwd-TSN */
4033		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4034			/*
4035			 * ISSUE with ECN, see FWD-TSN processing.
4036			 */
4037			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4038				send_forward_tsn(stcb, asoc);
4039			} else if (lchk) {
4040				/* try to FR fwd-tsn's that get lost too */
4041				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4042					send_forward_tsn(stcb, asoc);
4043				}
4044			}
4045		}
4046		if (lchk) {
4047			/* Assure a timer is up */
4048			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4049			    stcb->sctp_ep, stcb, lchk->whoTo);
4050		}
4051	}
4052	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4053		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4054		    rwnd,
4055		    stcb->asoc.peers_rwnd,
4056		    stcb->asoc.total_flight,
4057		    stcb->asoc.total_output_queue_size);
4058	}
4059}
4060
4061void
4062sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4063    struct sctp_tcb *stcb,
4064    uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4065    int *abort_now, uint8_t flags,
4066    uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4067{
4068	struct sctp_association *asoc;
4069	struct sctp_tmit_chunk *tp1, *tp2;
4070	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4071	uint16_t wake_him = 0;
4072	uint32_t send_s = 0;
4073	long j;
4074	int accum_moved = 0;
4075	int will_exit_fast_recovery = 0;
4076	uint32_t a_rwnd, old_rwnd;
4077	int win_probe_recovery = 0;
4078	int win_probe_recovered = 0;
4079	struct sctp_nets *net = NULL;
4080	int done_once;
4081	int rto_ok = 1;
4082	uint8_t reneged_all = 0;
4083	uint8_t cmt_dac_flag;
4084
4085	/*
4086	 * we take any chance we can to service our queues since we cannot
4087	 * get awoken when the socket is read from :<
4088	 */
4089	/*
4090	 * Now perform the actual SACK handling: 1) Verify that it is not an
4091	 * old sack, if so discard. 2) If there is nothing left in the send
4092	 * queue (cum-ack is equal to last acked) then you have a duplicate
4093	 * too, update any rwnd change and verify no timers are running.
4094	 * then return. 3) Process any new consequtive data i.e. cum-ack
4095	 * moved process these first and note that it moved. 4) Process any
4096	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4097	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4098	 * sync up flightsizes and things, stop all timers and also check
4099	 * for shutdown_pending state. If so then go ahead and send off the
4100	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4101	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4102	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4103	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4104	 * if in shutdown_recv state.
4105	 */
4106	SCTP_TCB_LOCK_ASSERT(stcb);
4107	/* CMT DAC algo */
4108	this_sack_lowest_newack = 0;
4109	SCTP_STAT_INCR(sctps_slowpath_sack);
4110	last_tsn = cum_ack;
4111	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4112#ifdef SCTP_ASOCLOG_OF_TSNS
4113	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4114	stcb->asoc.cumack_log_at++;
4115	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4116		stcb->asoc.cumack_log_at = 0;
4117	}
4118#endif
4119	a_rwnd = rwnd;
4120
4121	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4122		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4123		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4124	}
4125	old_rwnd = stcb->asoc.peers_rwnd;
4126	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4127		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4128		    stcb->asoc.overall_error_count,
4129		    0,
4130		    SCTP_FROM_SCTP_INDATA,
4131		    __LINE__);
4132	}
4133	stcb->asoc.overall_error_count = 0;
4134	asoc = &stcb->asoc;
4135	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4136		sctp_log_sack(asoc->last_acked_seq,
4137		    cum_ack,
4138		    0,
4139		    num_seg,
4140		    num_dup,
4141		    SCTP_LOG_NEW_SACK);
4142	}
4143	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4144		uint16_t i;
4145		uint32_t *dupdata, dblock;
4146
4147		for (i = 0; i < num_dup; i++) {
4148			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4149			    sizeof(uint32_t), (uint8_t *) & dblock);
4150			if (dupdata == NULL) {
4151				break;
4152			}
4153			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4154		}
4155	}
4156	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4157		/* reality check */
4158		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4159			tp1 = TAILQ_LAST(&asoc->sent_queue,
4160			    sctpchunk_listhead);
4161			send_s = tp1->rec.data.TSN_seq + 1;
4162		} else {
4163			tp1 = NULL;
4164			send_s = asoc->sending_seq;
4165		}
4166		if (SCTP_TSN_GE(cum_ack, send_s)) {
4167			struct mbuf *op_err;
4168			char msg[SCTP_DIAG_INFO_LEN];
4169
4170			/*
4171			 * no way, we have not even sent this TSN out yet.
4172			 * Peer is hopelessly messed up with us.
4173			 */
4174			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4175			    cum_ack, send_s);
4176			if (tp1) {
4177				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4178				    tp1->rec.data.TSN_seq, (void *)tp1);
4179			}
4180	hopeless_peer:
4181			*abort_now = 1;
4182			/* XXX */
4183			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
4184			    cum_ack, send_s);
4185			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4186			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4187			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4188			return;
4189		}
4190	}
4191	/**********************/
4192	/* 1) check the range */
4193	/**********************/
4194	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4195		/* acking something behind */
4196		return;
4197	}
4198	/* update the Rwnd of the peer */
4199	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4200	    TAILQ_EMPTY(&asoc->send_queue) &&
4201	    (asoc->stream_queue_cnt == 0)) {
4202		/* nothing left on send/sent and strmq */
4203		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4204			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4205			    asoc->peers_rwnd, 0, 0, a_rwnd);
4206		}
4207		asoc->peers_rwnd = a_rwnd;
4208		if (asoc->sent_queue_retran_cnt) {
4209			asoc->sent_queue_retran_cnt = 0;
4210		}
4211		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4212			/* SWS sender side engages */
4213			asoc->peers_rwnd = 0;
4214		}
4215		/* stop any timers */
4216		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4217			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4218			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4219			net->partial_bytes_acked = 0;
4220			net->flight_size = 0;
4221		}
4222		asoc->total_flight = 0;
4223		asoc->total_flight_count = 0;
4224		return;
4225	}
4226	/*
4227	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4228	 * things. The total byte count acked is tracked in netAckSz AND
4229	 * netAck2 is used to track the total bytes acked that are un-
4230	 * amibguious and were never retransmitted. We track these on a per
4231	 * destination address basis.
4232	 */
4233	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4234		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4235			/* Drag along the window_tsn for cwr's */
4236			net->cwr_window_tsn = cum_ack;
4237		}
4238		net->prev_cwnd = net->cwnd;
4239		net->net_ack = 0;
4240		net->net_ack2 = 0;
4241
4242		/*
4243		 * CMT: Reset CUC and Fast recovery algo variables before
4244		 * SACK processing
4245		 */
4246		net->new_pseudo_cumack = 0;
4247		net->will_exit_fast_recovery = 0;
4248		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4249			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4250		}
4251	}
4252	/* process the new consecutive TSN first */
4253	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4254		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4255			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4256				accum_moved = 1;
4257				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4258					/*
4259					 * If it is less than ACKED, it is
4260					 * now no-longer in flight. Higher
4261					 * values may occur during marking
4262					 */
4263					if ((tp1->whoTo->dest_state &
4264					    SCTP_ADDR_UNCONFIRMED) &&
4265					    (tp1->snd_count < 2)) {
4266						/*
4267						 * If there was no retran
4268						 * and the address is
4269						 * un-confirmed and we sent
4270						 * there and are now
4271						 * sacked.. its confirmed,
4272						 * mark it so.
4273						 */
4274						tp1->whoTo->dest_state &=
4275						    ~SCTP_ADDR_UNCONFIRMED;
4276					}
4277					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4278						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4279							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4280							    tp1->whoTo->flight_size,
4281							    tp1->book_size,
4282							    (uintptr_t) tp1->whoTo,
4283							    tp1->rec.data.TSN_seq);
4284						}
4285						sctp_flight_size_decrease(tp1);
4286						sctp_total_flight_decrease(stcb, tp1);
4287						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4288							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4289							    tp1);
4290						}
4291					}
4292					tp1->whoTo->net_ack += tp1->send_size;
4293
4294					/* CMT SFR and DAC algos */
4295					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4296					tp1->whoTo->saw_newack = 1;
4297
4298					if (tp1->snd_count < 2) {
4299						/*
4300						 * True non-retransmited
4301						 * chunk
4302						 */
4303						tp1->whoTo->net_ack2 +=
4304						    tp1->send_size;
4305
4306						/* update RTO too? */
4307						if (tp1->do_rtt) {
4308							if (rto_ok) {
4309								tp1->whoTo->RTO =
4310								    sctp_calculate_rto(stcb,
4311								    asoc, tp1->whoTo,
4312								    &tp1->sent_rcv_time,
4313								    sctp_align_safe_nocopy,
4314								    SCTP_RTT_FROM_DATA);
4315								rto_ok = 0;
4316							}
4317							if (tp1->whoTo->rto_needed == 0) {
4318								tp1->whoTo->rto_needed = 1;
4319							}
4320							tp1->do_rtt = 0;
4321						}
4322					}
4323					/*
4324					 * CMT: CUCv2 algorithm. From the
4325					 * cumack'd TSNs, for each TSN being
4326					 * acked for the first time, set the
4327					 * following variables for the
4328					 * corresp destination.
4329					 * new_pseudo_cumack will trigger a
4330					 * cwnd update.
4331					 * find_(rtx_)pseudo_cumack will
4332					 * trigger search for the next
4333					 * expected (rtx-)pseudo-cumack.
4334					 */
4335					tp1->whoTo->new_pseudo_cumack = 1;
4336					tp1->whoTo->find_pseudo_cumack = 1;
4337					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4338
4339
4340					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4341						sctp_log_sack(asoc->last_acked_seq,
4342						    cum_ack,
4343						    tp1->rec.data.TSN_seq,
4344						    0,
4345						    0,
4346						    SCTP_LOG_TSN_ACKED);
4347					}
4348					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4349						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4350					}
4351				}
4352				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4353					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4354#ifdef SCTP_AUDITING_ENABLED
4355					sctp_audit_log(0xB3,
4356					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4357#endif
4358				}
4359				if (tp1->rec.data.chunk_was_revoked) {
4360					/* deflate the cwnd */
4361					tp1->whoTo->cwnd -= tp1->book_size;
4362					tp1->rec.data.chunk_was_revoked = 0;
4363				}
4364				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4365					tp1->sent = SCTP_DATAGRAM_ACKED;
4366				}
4367			}
4368		} else {
4369			break;
4370		}
4371	}
4372	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4373	/* always set this up to cum-ack */
4374	asoc->this_sack_highest_gap = last_tsn;
4375
4376	if ((num_seg > 0) || (num_nr_seg > 0)) {
4377
4378		/*
4379		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4380		 * to be greater than the cumack. Also reset saw_newack to 0
4381		 * for all dests.
4382		 */
4383		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4384			net->saw_newack = 0;
4385			net->this_sack_highest_newack = last_tsn;
4386		}
4387
4388		/*
4389		 * thisSackHighestGap will increase while handling NEW
4390		 * segments this_sack_highest_newack will increase while
4391		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4392		 * used for CMT DAC algo. saw_newack will also change.
4393		 */
4394		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4395		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4396		    num_seg, num_nr_seg, &rto_ok)) {
4397			wake_him++;
4398		}
4399		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4400			/*
4401			 * validate the biggest_tsn_acked in the gap acks if
4402			 * strict adherence is wanted.
4403			 */
4404			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4405				/*
4406				 * peer is either confused or we are under
4407				 * attack. We must abort.
4408				 */
4409				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4410				    biggest_tsn_acked, send_s);
4411				goto hopeless_peer;
4412			}
4413		}
4414	}
4415	/*******************************************/
4416	/* cancel ALL T3-send timer if accum moved */
4417	/*******************************************/
4418	if (asoc->sctp_cmt_on_off > 0) {
4419		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4420			if (net->new_pseudo_cumack)
4421				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4422				    stcb, net,
4423				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4424
4425		}
4426	} else {
4427		if (accum_moved) {
4428			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4429				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4430				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4431			}
4432		}
4433	}
4434	/********************************************/
4435	/* drop the acked chunks from the sentqueue */
4436	/********************************************/
4437	asoc->last_acked_seq = cum_ack;
4438
4439	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4440		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4441			break;
4442		}
4443		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4444			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4445				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4446#ifdef INVARIANTS
4447			} else {
4448				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4449#endif
4450			}
4451		}
4452		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4453		if (PR_SCTP_ENABLED(tp1->flags)) {
4454			if (asoc->pr_sctp_cnt != 0)
4455				asoc->pr_sctp_cnt--;
4456		}
4457		asoc->sent_queue_cnt--;
4458		if (tp1->data) {
4459			/* sa_ignore NO_NULL_CHK */
4460			sctp_free_bufspace(stcb, asoc, tp1, 1);
4461			sctp_m_freem(tp1->data);
4462			tp1->data = NULL;
4463			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4464				asoc->sent_queue_cnt_removeable--;
4465			}
4466		}
4467		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4468			sctp_log_sack(asoc->last_acked_seq,
4469			    cum_ack,
4470			    tp1->rec.data.TSN_seq,
4471			    0,
4472			    0,
4473			    SCTP_LOG_FREE_SENT);
4474		}
4475		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4476		wake_him++;
4477	}
4478	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4479#ifdef INVARIANTS
4480		panic("Warning flight size is postive and should be 0");
4481#else
4482		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4483		    asoc->total_flight);
4484#endif
4485		asoc->total_flight = 0;
4486	}
4487	/* sa_ignore NO_NULL_CHK */
4488	if ((wake_him) && (stcb->sctp_socket)) {
4489#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4490		struct socket *so;
4491
4492#endif
4493		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4494		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4495			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4496		}
4497#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4498		so = SCTP_INP_SO(stcb->sctp_ep);
4499		atomic_add_int(&stcb->asoc.refcnt, 1);
4500		SCTP_TCB_UNLOCK(stcb);
4501		SCTP_SOCKET_LOCK(so, 1);
4502		SCTP_TCB_LOCK(stcb);
4503		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4504		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4505			/* assoc was freed while we were unlocked */
4506			SCTP_SOCKET_UNLOCK(so, 1);
4507			return;
4508		}
4509#endif
4510		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4511#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4512		SCTP_SOCKET_UNLOCK(so, 1);
4513#endif
4514	} else {
4515		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4516			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4517		}
4518	}
4519
4520	if (asoc->fast_retran_loss_recovery && accum_moved) {
4521		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4522			/* Setup so we will exit RFC2582 fast recovery */
4523			will_exit_fast_recovery = 1;
4524		}
4525	}
4526	/*
4527	 * Check for revoked fragments:
4528	 *
4529	 * if Previous sack - Had no frags then we can't have any revoked if
4530	 * Previous sack - Had frag's then - If we now have frags aka
4531	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4532	 * some of them. else - The peer revoked all ACKED fragments, since
4533	 * we had some before and now we have NONE.
4534	 */
4535
4536	if (num_seg) {
4537		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4538		asoc->saw_sack_with_frags = 1;
4539	} else if (asoc->saw_sack_with_frags) {
4540		int cnt_revoked = 0;
4541
4542		/* Peer revoked all dg's marked or acked */
4543		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4544			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4545				tp1->sent = SCTP_DATAGRAM_SENT;
4546				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4547					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4548					    tp1->whoTo->flight_size,
4549					    tp1->book_size,
4550					    (uintptr_t) tp1->whoTo,
4551					    tp1->rec.data.TSN_seq);
4552				}
4553				sctp_flight_size_increase(tp1);
4554				sctp_total_flight_increase(stcb, tp1);
4555				tp1->rec.data.chunk_was_revoked = 1;
4556				/*
4557				 * To ensure that this increase in
4558				 * flightsize, which is artificial, does not
4559				 * throttle the sender, we also increase the
4560				 * cwnd artificially.
4561				 */
4562				tp1->whoTo->cwnd += tp1->book_size;
4563				cnt_revoked++;
4564			}
4565		}
4566		if (cnt_revoked) {
4567			reneged_all = 1;
4568		}
4569		asoc->saw_sack_with_frags = 0;
4570	}
4571	if (num_nr_seg > 0)
4572		asoc->saw_sack_with_nr_frags = 1;
4573	else
4574		asoc->saw_sack_with_nr_frags = 0;
4575
4576	/* JRS - Use the congestion control given in the CC module */
4577	if (ecne_seen == 0) {
4578		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4579			if (net->net_ack2 > 0) {
4580				/*
4581				 * Karn's rule applies to clearing error
4582				 * count, this is optional.
4583				 */
4584				net->error_count = 0;
4585				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4586					/* addr came good */
4587					net->dest_state |= SCTP_ADDR_REACHABLE;
4588					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4589					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4590				}
4591				if (net == stcb->asoc.primary_destination) {
4592					if (stcb->asoc.alternate) {
4593						/*
4594						 * release the alternate,
4595						 * primary is good
4596						 */
4597						sctp_free_remote_addr(stcb->asoc.alternate);
4598						stcb->asoc.alternate = NULL;
4599					}
4600				}
4601				if (net->dest_state & SCTP_ADDR_PF) {
4602					net->dest_state &= ~SCTP_ADDR_PF;
4603					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4604					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4605					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4606					/* Done with this net */
4607					net->net_ack = 0;
4608				}
4609				/* restore any doubled timers */
4610				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4611				if (net->RTO < stcb->asoc.minrto) {
4612					net->RTO = stcb->asoc.minrto;
4613				}
4614				if (net->RTO > stcb->asoc.maxrto) {
4615					net->RTO = stcb->asoc.maxrto;
4616				}
4617			}
4618		}
4619		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4620	}
4621	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4622		/* nothing left in-flight */
4623		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4624			/* stop all timers */
4625			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4626			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4627			net->flight_size = 0;
4628			net->partial_bytes_acked = 0;
4629		}
4630		asoc->total_flight = 0;
4631		asoc->total_flight_count = 0;
4632	}
4633	/**********************************/
4634	/* Now what about shutdown issues */
4635	/**********************************/
4636	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4637		/* nothing left on sendqueue.. consider done */
4638		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4639			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4640			    asoc->peers_rwnd, 0, 0, a_rwnd);
4641		}
4642		asoc->peers_rwnd = a_rwnd;
4643		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4644			/* SWS sender side engages */
4645			asoc->peers_rwnd = 0;
4646		}
4647		/* clean up */
4648		if ((asoc->stream_queue_cnt == 1) &&
4649		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4650		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4651		    (asoc->locked_on_sending)
4652		    ) {
4653			struct sctp_stream_queue_pending *sp;
4654
4655			/*
4656			 * I may be in a state where we got all across.. but
4657			 * cannot write more due to a shutdown... we abort
4658			 * since the user did not indicate EOR in this case.
4659			 */
4660			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4661			    sctp_streamhead);
4662			if ((sp) && (sp->length == 0)) {
4663				asoc->locked_on_sending = NULL;
4664				if (sp->msg_is_complete) {
4665					asoc->stream_queue_cnt--;
4666				} else {
4667					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4668					asoc->stream_queue_cnt--;
4669				}
4670			}
4671		}
4672		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4673		    (asoc->stream_queue_cnt == 0)) {
4674			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4675				/* Need to abort here */
4676				struct mbuf *op_err;
4677
4678		abort_out_now:
4679				*abort_now = 1;
4680				/* XXX */
4681				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4682				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4683				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4684				return;
4685			} else {
4686				struct sctp_nets *netp;
4687
4688				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4689				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4690					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4691				}
4692				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4693				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4694				sctp_stop_timers_for_shutdown(stcb);
4695				if (asoc->alternate) {
4696					netp = asoc->alternate;
4697				} else {
4698					netp = asoc->primary_destination;
4699				}
4700				sctp_send_shutdown(stcb, netp);
4701				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4702				    stcb->sctp_ep, stcb, netp);
4703				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4704				    stcb->sctp_ep, stcb, netp);
4705			}
4706			return;
4707		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4708		    (asoc->stream_queue_cnt == 0)) {
4709			struct sctp_nets *netp;
4710
4711			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4712				goto abort_out_now;
4713			}
4714			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4715			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4716			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4717			sctp_stop_timers_for_shutdown(stcb);
4718			if (asoc->alternate) {
4719				netp = asoc->alternate;
4720			} else {
4721				netp = asoc->primary_destination;
4722			}
4723			sctp_send_shutdown_ack(stcb, netp);
4724			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4725			    stcb->sctp_ep, stcb, netp);
4726			return;
4727		}
4728	}
4729	/*
4730	 * Now here we are going to recycle net_ack for a different use...
4731	 * HEADS UP.
4732	 */
4733	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4734		net->net_ack = 0;
4735	}
4736
4737	/*
4738	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4739	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4740	 * automatically ensure that.
4741	 */
4742	if ((asoc->sctp_cmt_on_off > 0) &&
4743	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4744	    (cmt_dac_flag == 0)) {
4745		this_sack_lowest_newack = cum_ack;
4746	}
4747	if ((num_seg > 0) || (num_nr_seg > 0)) {
4748		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4749		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4750	}
4751	/* JRS - Use the congestion control given in the CC module */
4752	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4753
4754	/* Now are we exiting loss recovery ? */
4755	if (will_exit_fast_recovery) {
4756		/* Ok, we must exit fast recovery */
4757		asoc->fast_retran_loss_recovery = 0;
4758	}
4759	if ((asoc->sat_t3_loss_recovery) &&
4760	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4761		/* end satellite t3 loss recovery */
4762		asoc->sat_t3_loss_recovery = 0;
4763	}
4764	/*
4765	 * CMT Fast recovery
4766	 */
4767	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4768		if (net->will_exit_fast_recovery) {
4769			/* Ok, we must exit fast recovery */
4770			net->fast_retran_loss_recovery = 0;
4771		}
4772	}
4773
4774	/* Adjust and set the new rwnd value */
4775	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4776		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4777		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4778	}
4779	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4780	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4781	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4782		/* SWS sender side engages */
4783		asoc->peers_rwnd = 0;
4784	}
4785	if (asoc->peers_rwnd > old_rwnd) {
4786		win_probe_recovery = 1;
4787	}
4788	/*
4789	 * Now we must setup so we have a timer up for anyone with
4790	 * outstanding data.
4791	 */
4792	done_once = 0;
4793again:
4794	j = 0;
4795	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4796		if (win_probe_recovery && (net->window_probe)) {
4797			win_probe_recovered = 1;
4798			/*-
4799			 * Find first chunk that was used with
4800			 * window probe and clear the event. Put
4801			 * it back into the send queue as if has
4802			 * not been sent.
4803			 */
4804			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4805				if (tp1->window_probe) {
4806					sctp_window_probe_recovery(stcb, asoc, tp1);
4807					break;
4808				}
4809			}
4810		}
4811		if (net->flight_size) {
4812			j++;
4813			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4814				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4815				    stcb->sctp_ep, stcb, net);
4816			}
4817			if (net->window_probe) {
4818				net->window_probe = 0;
4819			}
4820		} else {
4821			if (net->window_probe) {
4822				/*
4823				 * In window probes we must assure a timer
4824				 * is still running there
4825				 */
4826				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4827					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4828					    stcb->sctp_ep, stcb, net);
4829
4830				}
4831			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4832				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4833				    stcb, net,
4834				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4835			}
4836		}
4837	}
4838	if ((j == 0) &&
4839	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4840	    (asoc->sent_queue_retran_cnt == 0) &&
4841	    (win_probe_recovered == 0) &&
4842	    (done_once == 0)) {
4843		/*
4844		 * huh, this should not happen unless all packets are
4845		 * PR-SCTP and marked to skip of course.
4846		 */
4847		if (sctp_fs_audit(asoc)) {
4848			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4849				net->flight_size = 0;
4850			}
4851			asoc->total_flight = 0;
4852			asoc->total_flight_count = 0;
4853			asoc->sent_queue_retran_cnt = 0;
4854			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4855				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4856					sctp_flight_size_increase(tp1);
4857					sctp_total_flight_increase(stcb, tp1);
4858				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4859					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4860				}
4861			}
4862		}
4863		done_once = 1;
4864		goto again;
4865	}
4866	/*********************************************/
4867	/* Here we perform PR-SCTP procedures        */
4868	/* (section 4.2)                             */
4869	/*********************************************/
4870	/* C1. update advancedPeerAckPoint */
4871	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4872		asoc->advanced_peer_ack_point = cum_ack;
4873	}
4874	/* C2. try to further move advancedPeerAckPoint ahead */
4875	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4876		struct sctp_tmit_chunk *lchk;
4877		uint32_t old_adv_peer_ack_point;
4878
4879		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4880		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4881		/* C3. See if we need to send a Fwd-TSN */
4882		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4883			/*
4884			 * ISSUE with ECN, see FWD-TSN processing.
4885			 */
4886			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4887				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4888				    0xee, cum_ack, asoc->advanced_peer_ack_point,
4889				    old_adv_peer_ack_point);
4890			}
4891			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4892				send_forward_tsn(stcb, asoc);
4893			} else if (lchk) {
4894				/* try to FR fwd-tsn's that get lost too */
4895				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4896					send_forward_tsn(stcb, asoc);
4897				}
4898			}
4899		}
4900		if (lchk) {
4901			/* Assure a timer is up */
4902			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4903			    stcb->sctp_ep, stcb, lchk->whoTo);
4904		}
4905	}
4906	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4907		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4908		    a_rwnd,
4909		    stcb->asoc.peers_rwnd,
4910		    stcb->asoc.total_flight,
4911		    stcb->asoc.total_output_queue_size);
4912	}
4913}
4914
4915void
4916sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4917{
4918	/* Copy cum-ack */
4919	uint32_t cum_ack, a_rwnd;
4920
4921	cum_ack = ntohl(cp->cumulative_tsn_ack);
4922	/* Arrange so a_rwnd does NOT change */
4923	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4924
4925	/* Now call the express sack handling */
4926	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4927}
4928
4929static void
4930sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4931    struct sctp_stream_in *strmin)
4932{
4933	struct sctp_queued_to_read *ctl, *nctl;
4934	struct sctp_association *asoc;
4935	uint16_t tt;
4936
4937	asoc = &stcb->asoc;
4938	tt = strmin->last_sequence_delivered;
4939	/*
4940	 * First deliver anything prior to and including the stream no that
4941	 * came in
4942	 */
4943	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4944		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4945			/* this is deliverable now */
4946			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4947			/* subtract pending on streams */
4948			asoc->size_on_all_streams -= ctl->length;
4949			sctp_ucount_decr(asoc->cnt_on_all_streams);
4950			/* deliver it to at least the delivery-q */
4951			if (stcb->sctp_socket) {
4952				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4953				sctp_add_to_readq(stcb->sctp_ep, stcb,
4954				    ctl,
4955				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4956			}
4957		} else {
4958			/* no more delivery now. */
4959			break;
4960		}
4961	}
4962	/*
4963	 * now we must deliver things in queue the normal way  if any are
4964	 * now ready.
4965	 */
4966	tt = strmin->last_sequence_delivered + 1;
4967	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4968		if (tt == ctl->sinfo_ssn) {
4969			/* this is deliverable now */
4970			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4971			/* subtract pending on streams */
4972			asoc->size_on_all_streams -= ctl->length;
4973			sctp_ucount_decr(asoc->cnt_on_all_streams);
4974			/* deliver it to at least the delivery-q */
4975			strmin->last_sequence_delivered = ctl->sinfo_ssn;
4976			if (stcb->sctp_socket) {
4977				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4978				sctp_add_to_readq(stcb->sctp_ep, stcb,
4979				    ctl,
4980				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4981
4982			}
4983			tt = strmin->last_sequence_delivered + 1;
4984		} else {
4985			break;
4986		}
4987	}
4988}
4989
4990static void
4991sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
4992    struct sctp_association *asoc,
4993    uint16_t stream, uint16_t seq)
4994{
4995	struct sctp_tmit_chunk *chk, *nchk;
4996
4997	/* For each one on here see if we need to toss it */
4998	/*
4999	 * For now large messages held on the reasmqueue that are complete
5000	 * will be tossed too. We could in theory do more work to spin
5001	 * through and stop after dumping one msg aka seeing the start of a
5002	 * new msg at the head, and call the delivery function... to see if
5003	 * it can be delivered... But for now we just dump everything on the
5004	 * queue.
5005	 */
5006	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5007		/*
5008		 * Do not toss it if on a different stream or marked for
5009		 * unordered delivery in which case the stream sequence
5010		 * number has no meaning.
5011		 */
5012		if ((chk->rec.data.stream_number != stream) ||
5013		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5014			continue;
5015		}
5016		if (chk->rec.data.stream_seq == seq) {
5017			/* It needs to be tossed */
5018			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5019			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5020				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5021				asoc->str_of_pdapi = chk->rec.data.stream_number;
5022				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5023				asoc->fragment_flags = chk->rec.data.rcv_flags;
5024			}
5025			asoc->size_on_reasm_queue -= chk->send_size;
5026			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5027
5028			/* Clear up any stream problem */
5029			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5030			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5031				/*
5032				 * We must dump forward this streams
5033				 * sequence number if the chunk is not
5034				 * unordered that is being skipped. There is
5035				 * a chance that if the peer does not
5036				 * include the last fragment in its FWD-TSN
5037				 * we WILL have a problem here since you
5038				 * would have a partial chunk in queue that
5039				 * may not be deliverable. Also if a Partial
5040				 * delivery API as started the user may get
5041				 * a partial chunk. The next read returning
5042				 * a new chunk... really ugly but I see no
5043				 * way around it! Maybe a notify??
5044				 */
5045				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5046			}
5047			if (chk->data) {
5048				sctp_m_freem(chk->data);
5049				chk->data = NULL;
5050			}
5051			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5052		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5053			/*
5054			 * If the stream_seq is > than the purging one, we
5055			 * are done
5056			 */
5057			break;
5058		}
5059	}
5060}
5061
5062
5063void
5064sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5065    struct sctp_forward_tsn_chunk *fwd,
5066    int *abort_flag, struct mbuf *m, int offset)
5067{
5068	/* The pr-sctp fwd tsn */
5069	/*
5070	 * here we will perform all the data receiver side steps for
5071	 * processing FwdTSN, as required in by pr-sctp draft:
5072	 *
5073	 * Assume we get FwdTSN(x):
5074	 *
5075	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5076	 * others we have 3) examine and update re-ordering queue on
5077	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5078	 * report where we are.
5079	 */
5080	struct sctp_association *asoc;
5081	uint32_t new_cum_tsn, gap;
5082	unsigned int i, fwd_sz, m_size;
5083	uint32_t str_seq;
5084	struct sctp_stream_in *strm;
5085	struct sctp_tmit_chunk *chk, *nchk;
5086	struct sctp_queued_to_read *ctl, *sv;
5087
5088	asoc = &stcb->asoc;
5089	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5090		SCTPDBG(SCTP_DEBUG_INDATA1,
5091		    "Bad size too small/big fwd-tsn\n");
5092		return;
5093	}
5094	m_size = (stcb->asoc.mapping_array_size << 3);
5095	/*************************************************************/
5096	/* 1. Here we update local cumTSN and shift the bitmap array */
5097	/*************************************************************/
5098	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5099
5100	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5101		/* Already got there ... */
5102		return;
5103	}
5104	/*
5105	 * now we know the new TSN is more advanced, let's find the actual
5106	 * gap
5107	 */
5108	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5109	asoc->cumulative_tsn = new_cum_tsn;
5110	if (gap >= m_size) {
5111		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5112			struct mbuf *op_err;
5113			char msg[SCTP_DIAG_INFO_LEN];
5114
5115			/*
5116			 * out of range (of single byte chunks in the rwnd I
5117			 * give out). This must be an attacker.
5118			 */
5119			*abort_flag = 1;
5120			snprintf(msg, sizeof(msg),
5121			    "New cum ack %8.8x too high, highest TSN %8.8x",
5122			    new_cum_tsn, asoc->highest_tsn_inside_map);
5123			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5124			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5125			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5126			return;
5127		}
5128		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5129
5130		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5131		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5132		asoc->highest_tsn_inside_map = new_cum_tsn;
5133
5134		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5135		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5136
5137		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5138			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5139		}
5140	} else {
5141		SCTP_TCB_LOCK_ASSERT(stcb);
5142		for (i = 0; i <= gap; i++) {
5143			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5144			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5145				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5146				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5147					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5148				}
5149			}
5150		}
5151	}
5152	/*************************************************************/
5153	/* 2. Clear up re-assembly queue                             */
5154	/*************************************************************/
5155	/*
5156	 * First service it if pd-api is up, just in case we can progress it
5157	 * forward
5158	 */
5159	if (asoc->fragmented_delivery_inprogress) {
5160		sctp_service_reassembly(stcb, asoc);
5161	}
5162	/* For each one on here see if we need to toss it */
5163	/*
5164	 * For now large messages held on the reasmqueue that are complete
5165	 * will be tossed too. We could in theory do more work to spin
5166	 * through and stop after dumping one msg aka seeing the start of a
5167	 * new msg at the head, and call the delivery function... to see if
5168	 * it can be delivered... But for now we just dump everything on the
5169	 * queue.
5170	 */
5171	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5172		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5173			/* It needs to be tossed */
5174			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5175			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5176				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5177				asoc->str_of_pdapi = chk->rec.data.stream_number;
5178				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5179				asoc->fragment_flags = chk->rec.data.rcv_flags;
5180			}
5181			asoc->size_on_reasm_queue -= chk->send_size;
5182			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5183
5184			/* Clear up any stream problem */
5185			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5186			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5187				/*
5188				 * We must dump forward this streams
5189				 * sequence number if the chunk is not
5190				 * unordered that is being skipped. There is
5191				 * a chance that if the peer does not
5192				 * include the last fragment in its FWD-TSN
5193				 * we WILL have a problem here since you
5194				 * would have a partial chunk in queue that
5195				 * may not be deliverable. Also if a Partial
5196				 * delivery API as started the user may get
5197				 * a partial chunk. The next read returning
5198				 * a new chunk... really ugly but I see no
5199				 * way around it! Maybe a notify??
5200				 */
5201				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5202			}
5203			if (chk->data) {
5204				sctp_m_freem(chk->data);
5205				chk->data = NULL;
5206			}
5207			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5208		} else {
5209			/*
5210			 * Ok we have gone beyond the end of the fwd-tsn's
5211			 * mark.
5212			 */
5213			break;
5214		}
5215	}
5216	/*******************************************************/
5217	/* 3. Update the PR-stream re-ordering queues and fix  */
5218	/* delivery issues as needed.                       */
5219	/*******************************************************/
5220	fwd_sz -= sizeof(*fwd);
5221	if (m && fwd_sz) {
5222		/* New method. */
5223		unsigned int num_str;
5224		struct sctp_strseq *stseq, strseqbuf;
5225
5226		offset += sizeof(*fwd);
5227
5228		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5229		num_str = fwd_sz / sizeof(struct sctp_strseq);
5230		for (i = 0; i < num_str; i++) {
5231			uint16_t st;
5232
5233			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5234			    sizeof(struct sctp_strseq),
5235			    (uint8_t *) & strseqbuf);
5236			offset += sizeof(struct sctp_strseq);
5237			if (stseq == NULL) {
5238				break;
5239			}
5240			/* Convert */
5241			st = ntohs(stseq->stream);
5242			stseq->stream = st;
5243			st = ntohs(stseq->sequence);
5244			stseq->sequence = st;
5245
5246			/* now process */
5247
5248			/*
5249			 * Ok we now look for the stream/seq on the read
5250			 * queue where its not all delivered. If we find it
5251			 * we transmute the read entry into a PDI_ABORTED.
5252			 */
5253			if (stseq->stream >= asoc->streamincnt) {
5254				/* screwed up streams, stop!  */
5255				break;
5256			}
5257			if ((asoc->str_of_pdapi == stseq->stream) &&
5258			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5259				/*
5260				 * If this is the one we were partially
5261				 * delivering now then we no longer are.
5262				 * Note this will change with the reassembly
5263				 * re-write.
5264				 */
5265				asoc->fragmented_delivery_inprogress = 0;
5266			}
5267			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5268			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5269				if ((ctl->sinfo_stream == stseq->stream) &&
5270				    (ctl->sinfo_ssn == stseq->sequence)) {
5271					str_seq = (stseq->stream << 16) | stseq->sequence;
5272					ctl->end_added = 1;
5273					ctl->pdapi_aborted = 1;
5274					sv = stcb->asoc.control_pdapi;
5275					stcb->asoc.control_pdapi = ctl;
5276					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5277					    stcb,
5278					    SCTP_PARTIAL_DELIVERY_ABORTED,
5279					    (void *)&str_seq,
5280					    SCTP_SO_NOT_LOCKED);
5281					stcb->asoc.control_pdapi = sv;
5282					break;
5283				} else if ((ctl->sinfo_stream == stseq->stream) &&
5284				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5285					/* We are past our victim SSN */
5286					break;
5287				}
5288			}
5289			strm = &asoc->strmin[stseq->stream];
5290			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5291				/* Update the sequence number */
5292				strm->last_sequence_delivered = stseq->sequence;
5293			}
5294			/* now kick the stream the new way */
5295			/* sa_ignore NO_NULL_CHK */
5296			sctp_kick_prsctp_reorder_queue(stcb, strm);
5297		}
5298		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5299	}
5300	/*
5301	 * Now slide thing forward.
5302	 */
5303	sctp_slide_mapping_arrays(stcb);
5304
5305	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5306		/* now lets kick out and check for more fragmented delivery */
5307		/* sa_ignore NO_NULL_CHK */
5308		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5309	}
5310}
5311