sctp_indata.c revision 237049
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 237049 2012-06-14 06:54:48Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_indata.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_timer.h>
47
48
49/*
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
53 *
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
56 * the list.
57 */
58
59void
60sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61{
62	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63}
64
65/* Calculate what the rwnd would be */
66uint32_t
67sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68{
69	uint32_t calc = 0;
70
71	/*
72	 * This is really set wrong with respect to a 1-2-m socket. Since
73	 * the sb_cc is the count that everyone as put up. When we re-write
74	 * sctp_soreceive then we will fix this so that ONLY this
75	 * associations data is taken into account.
76	 */
77	if (stcb->sctp_socket == NULL)
78		return (calc);
79
80	if (stcb->asoc.sb_cc == 0 &&
81	    asoc->size_on_reasm_queue == 0 &&
82	    asoc->size_on_all_streams == 0) {
83		/* Full rwnd granted */
84		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85		return (calc);
86	}
87	/* get actual space */
88	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89
90	/*
91	 * take out what has NOT been put on socket queue and we yet hold
92	 * for putting up.
93	 */
94	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95	    asoc->cnt_on_reasm_queue * MSIZE));
96	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97	    asoc->cnt_on_all_streams * MSIZE));
98
99	if (calc == 0) {
100		/* out of space */
101		return (calc);
102	}
103	/* what is the overhead of all these rwnd's */
104	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105	/*
106	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107	 * even it is 0. SWS engaged
108	 */
109	if (calc < stcb->asoc.my_rwnd_control_len) {
110		calc = 1;
111	}
112	return (calc);
113}
114
115
116
117/*
118 * Build out our readq entry based on the incoming packet.
119 */
120struct sctp_queued_to_read *
121sctp_build_readq_entry(struct sctp_tcb *stcb,
122    struct sctp_nets *net,
123    uint32_t tsn, uint32_t ppid,
124    uint32_t context, uint16_t stream_no,
125    uint16_t stream_seq, uint8_t flags,
126    struct mbuf *dm)
127{
128	struct sctp_queued_to_read *read_queue_e = NULL;
129
130	sctp_alloc_a_readq(stcb, read_queue_e);
131	if (read_queue_e == NULL) {
132		goto failed_build;
133	}
134	read_queue_e->sinfo_stream = stream_no;
135	read_queue_e->sinfo_ssn = stream_seq;
136	read_queue_e->sinfo_flags = (flags << 8);
137	read_queue_e->sinfo_ppid = ppid;
138	read_queue_e->sinfo_context = context;
139	read_queue_e->sinfo_timetolive = 0;
140	read_queue_e->sinfo_tsn = tsn;
141	read_queue_e->sinfo_cumtsn = tsn;
142	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143	read_queue_e->whoFrom = net;
144	read_queue_e->length = 0;
145	atomic_add_int(&net->ref_count, 1);
146	read_queue_e->data = dm;
147	read_queue_e->spec_flags = 0;
148	read_queue_e->tail_mbuf = NULL;
149	read_queue_e->aux_data = NULL;
150	read_queue_e->stcb = stcb;
151	read_queue_e->port_from = stcb->rport;
152	read_queue_e->do_not_ref_stcb = 0;
153	read_queue_e->end_added = 0;
154	read_queue_e->some_taken = 0;
155	read_queue_e->pdapi_aborted = 0;
156failed_build:
157	return (read_queue_e);
158}
159
160
161/*
162 * Build out our readq entry based on the incoming packet.
163 */
164static struct sctp_queued_to_read *
165sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166    struct sctp_tmit_chunk *chk)
167{
168	struct sctp_queued_to_read *read_queue_e = NULL;
169
170	sctp_alloc_a_readq(stcb, read_queue_e);
171	if (read_queue_e == NULL) {
172		goto failed_build;
173	}
174	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178	read_queue_e->sinfo_context = stcb->asoc.context;
179	read_queue_e->sinfo_timetolive = 0;
180	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183	read_queue_e->whoFrom = chk->whoTo;
184	read_queue_e->aux_data = NULL;
185	read_queue_e->length = 0;
186	atomic_add_int(&chk->whoTo->ref_count, 1);
187	read_queue_e->data = chk->data;
188	read_queue_e->tail_mbuf = NULL;
189	read_queue_e->stcb = stcb;
190	read_queue_e->port_from = stcb->rport;
191	read_queue_e->spec_flags = 0;
192	read_queue_e->do_not_ref_stcb = 0;
193	read_queue_e->end_added = 0;
194	read_queue_e->some_taken = 0;
195	read_queue_e->pdapi_aborted = 0;
196failed_build:
197	return (read_queue_e);
198}
199
200
201struct mbuf *
202sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203{
204	struct sctp_extrcvinfo *seinfo;
205	struct sctp_sndrcvinfo *outinfo;
206	struct sctp_rcvinfo *rcvinfo;
207	struct sctp_nxtinfo *nxtinfo;
208	struct cmsghdr *cmh;
209	struct mbuf *ret;
210	int len;
211	int use_extended;
212	int provide_nxt;
213
214	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217		/* user does not want any ancillary data */
218		return (NULL);
219	}
220	len = 0;
221	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223	}
224	seinfo = (struct sctp_extrcvinfo *)sinfo;
225	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227		provide_nxt = 1;
228		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229	} else {
230		provide_nxt = 0;
231	}
232	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234			use_extended = 1;
235			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236		} else {
237			use_extended = 0;
238			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239		}
240	} else {
241		use_extended = 0;
242	}
243
244	ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
245	if (ret == NULL) {
246		/* No space */
247		return (ret);
248	}
249	SCTP_BUF_LEN(ret) = 0;
250
251	/* We need a CMSG header followed by the struct */
252	cmh = mtod(ret, struct cmsghdr *);
253	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254		cmh->cmsg_level = IPPROTO_SCTP;
255		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256		cmh->cmsg_type = SCTP_RCVINFO;
257		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258		rcvinfo->rcv_sid = sinfo->sinfo_stream;
259		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260		rcvinfo->rcv_flags = sinfo->sinfo_flags;
261		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264		rcvinfo->rcv_context = sinfo->sinfo_context;
265		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
268	}
269	if (provide_nxt) {
270		cmh->cmsg_level = IPPROTO_SCTP;
271		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272		cmh->cmsg_type = SCTP_NXTINFO;
273		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275		nxtinfo->nxt_flags = 0;
276		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277			nxtinfo->nxt_flags |= SCTP_UNORDERED;
278		}
279		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
281		}
282		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283			nxtinfo->nxt_flags |= SCTP_COMPLETE;
284		}
285		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290	}
291	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292		cmh->cmsg_level = IPPROTO_SCTP;
293		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294		if (use_extended) {
295			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296			cmh->cmsg_type = SCTP_EXTRCV;
297			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299		} else {
300			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301			cmh->cmsg_type = SCTP_SNDRCV;
302			*outinfo = *sinfo;
303			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304		}
305	}
306	return (ret);
307}
308
309
310static void
311sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312{
313	uint32_t gap, i, cumackp1;
314	int fnd = 0;
315
316	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317		return;
318	}
319	cumackp1 = asoc->cumulative_tsn + 1;
320	if (SCTP_TSN_GT(cumackp1, tsn)) {
321		/*
322		 * this tsn is behind the cum ack and thus we don't need to
323		 * worry about it being moved from one to the other.
324		 */
325		return;
326	}
327	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330		sctp_print_mapping_array(asoc);
331#ifdef INVARIANTS
332		panic("Things are really messed up now!!");
333#endif
334	}
335	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338		asoc->highest_tsn_inside_nr_map = tsn;
339	}
340	if (tsn == asoc->highest_tsn_inside_map) {
341		/* We must back down to see what the new highest is */
342		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345				asoc->highest_tsn_inside_map = i;
346				fnd = 1;
347				break;
348			}
349		}
350		if (!fnd) {
351			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
352		}
353	}
354}
355
356
357/*
358 * We are delivering currently from the reassembly queue. We must continue to
359 * deliver until we either: 1) run out of space. 2) run out of sequential
360 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
361 */
362static void
363sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
364{
365	struct sctp_tmit_chunk *chk, *nchk;
366	uint16_t nxt_todel;
367	uint16_t stream_no;
368	int end = 0;
369	int cntDel;
370	struct sctp_queued_to_read *control, *ctl, *nctl;
371
372	if (stcb == NULL)
373		return;
374
375	cntDel = stream_no = 0;
376	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379		/* socket above is long gone or going.. */
380abandon:
381		asoc->fragmented_delivery_inprogress = 0;
382		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384			asoc->size_on_reasm_queue -= chk->send_size;
385			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
386			/*
387			 * Lose the data pointer, since its in the socket
388			 * buffer
389			 */
390			if (chk->data) {
391				sctp_m_freem(chk->data);
392				chk->data = NULL;
393			}
394			/* Now free the address and data */
395			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396			/* sa_ignore FREED_MEMORY */
397		}
398		return;
399	}
400	SCTP_TCB_LOCK_ASSERT(stcb);
401	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403			/* Can't deliver more :< */
404			return;
405		}
406		stream_no = chk->rec.data.stream_number;
407		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408		if (nxt_todel != chk->rec.data.stream_seq &&
409		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
410			/*
411			 * Not the next sequence to deliver in its stream OR
412			 * unordered
413			 */
414			return;
415		}
416		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
417
418			control = sctp_build_readq_entry_chk(stcb, chk);
419			if (control == NULL) {
420				/* out of memory? */
421				return;
422			}
423			/* save it off for our future deliveries */
424			stcb->asoc.control_pdapi = control;
425			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
426				end = 1;
427			else
428				end = 0;
429			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430			sctp_add_to_readq(stcb->sctp_ep,
431			    stcb, control, &stcb->sctp_socket->so_rcv, end,
432			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
433			cntDel++;
434		} else {
435			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
436				end = 1;
437			else
438				end = 0;
439			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441			    stcb->asoc.control_pdapi,
442			    chk->data, end, chk->rec.data.TSN_seq,
443			    &stcb->sctp_socket->so_rcv)) {
444				/*
445				 * something is very wrong, either
446				 * control_pdapi is NULL, or the tail_mbuf
447				 * is corrupt, or there is a EOM already on
448				 * the mbuf chain.
449				 */
450				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
451					goto abandon;
452				} else {
453#ifdef INVARIANTS
454					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455						panic("This should not happen control_pdapi NULL?");
456					}
457					/* if we did not panic, it was a EOM */
458					panic("Bad chunking ??");
459#else
460					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
462					}
463					SCTP_PRINTF("Bad chunking ??\n");
464					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
465
466#endif
467					goto abandon;
468				}
469			}
470			cntDel++;
471		}
472		/* pull it we did it */
473		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475			asoc->fragmented_delivery_inprogress = 0;
476			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477				asoc->strmin[stream_no].last_sequence_delivered++;
478			}
479			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
481			}
482		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
483			/*
484			 * turn the flag back on since we just  delivered
485			 * yet another one.
486			 */
487			asoc->fragmented_delivery_inprogress = 1;
488		}
489		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
493
494		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495		asoc->size_on_reasm_queue -= chk->send_size;
496		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497		/* free up the chk */
498		chk->data = NULL;
499		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
500
501		if (asoc->fragmented_delivery_inprogress == 0) {
502			/*
503			 * Now lets see if we can deliver the next one on
504			 * the stream
505			 */
506			struct sctp_stream_in *strm;
507
508			strm = &asoc->strmin[stream_no];
509			nxt_todel = strm->last_sequence_delivered + 1;
510			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511				/* Deliver more if we can. */
512				if (nxt_todel == ctl->sinfo_ssn) {
513					TAILQ_REMOVE(&strm->inqueue, ctl, next);
514					asoc->size_on_all_streams -= ctl->length;
515					sctp_ucount_decr(asoc->cnt_on_all_streams);
516					strm->last_sequence_delivered++;
517					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518					sctp_add_to_readq(stcb->sctp_ep, stcb,
519					    ctl,
520					    &stcb->sctp_socket->so_rcv, 1,
521					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
522				} else {
523					break;
524				}
525				nxt_todel = strm->last_sequence_delivered + 1;
526			}
527			break;
528		}
529	}
530}
531
532/*
533 * Queue the chunk either right into the socket buffer if it is the next one
534 * to go OR put it in the correct place in the delivery queue.  If we do
535 * append to the so_buf, keep doing so until we are out of order. One big
536 * question still remains, what to do when the socket buffer is FULL??
537 */
538static void
539sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540    struct sctp_queued_to_read *control, int *abort_flag)
541{
542	/*
543	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544	 * all the data in one stream this could happen quite rapidly. One
545	 * could use the TSN to keep track of things, but this scheme breaks
546	 * down in the other type of stream useage that could occur. Send a
547	 * single msg to stream 0, send 4Billion messages to stream 1, now
548	 * send a message to stream 0. You have a situation where the TSN
549	 * has wrapped but not in the stream. Is this worth worrying about
550	 * or should we just change our queue sort at the bottom to be by
551	 * TSN.
552	 *
553	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555	 * assignment this could happen... and I don't see how this would be
556	 * a violation. So for now I am undecided an will leave the sort by
557	 * SSN alone. Maybe a hybred approach is the answer
558	 *
559	 */
560	struct sctp_stream_in *strm;
561	struct sctp_queued_to_read *at;
562	int queue_needed;
563	uint16_t nxt_todel;
564	struct mbuf *oper;
565
566	queue_needed = 1;
567	asoc->size_on_all_streams += control->length;
568	sctp_ucount_incr(asoc->cnt_on_all_streams);
569	strm = &asoc->strmin[control->sinfo_stream];
570	nxt_todel = strm->last_sequence_delivered + 1;
571	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
572		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
573	}
574	SCTPDBG(SCTP_DEBUG_INDATA1,
575	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
576	    (uint32_t) control->sinfo_stream,
577	    (uint32_t) strm->last_sequence_delivered,
578	    (uint32_t) nxt_todel);
579	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
580		/* The incoming sseq is behind where we last delivered? */
581		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
582		    control->sinfo_ssn, strm->last_sequence_delivered);
583protocol_error:
584		/*
585		 * throw it in the stream so it gets cleaned up in
586		 * association destruction
587		 */
588		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
589		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
590		    0, M_DONTWAIT, 1, MT_DATA);
591		if (oper) {
592			struct sctp_paramhdr *ph;
593			uint32_t *ippp;
594
595			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
596			    (sizeof(uint32_t) * 3);
597			ph = mtod(oper, struct sctp_paramhdr *);
598			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
599			ph->param_length = htons(SCTP_BUF_LEN(oper));
600			ippp = (uint32_t *) (ph + 1);
601			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
602			ippp++;
603			*ippp = control->sinfo_tsn;
604			ippp++;
605			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
606		}
607		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
608		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
609		*abort_flag = 1;
610		return;
611
612	}
613	if (nxt_todel == control->sinfo_ssn) {
614		/* can be delivered right away? */
615		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
617		}
618		/* EY it wont be queued if it could be delivered directly */
619		queue_needed = 0;
620		asoc->size_on_all_streams -= control->length;
621		sctp_ucount_decr(asoc->cnt_on_all_streams);
622		strm->last_sequence_delivered++;
623
624		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625		sctp_add_to_readq(stcb->sctp_ep, stcb,
626		    control,
627		    &stcb->sctp_socket->so_rcv, 1,
628		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
630			/* all delivered */
631			nxt_todel = strm->last_sequence_delivered + 1;
632			if (nxt_todel == control->sinfo_ssn) {
633				TAILQ_REMOVE(&strm->inqueue, control, next);
634				asoc->size_on_all_streams -= control->length;
635				sctp_ucount_decr(asoc->cnt_on_all_streams);
636				strm->last_sequence_delivered++;
637				/*
638				 * We ignore the return of deliver_data here
639				 * since we always can hold the chunk on the
640				 * d-queue. And we have a finite number that
641				 * can be delivered from the strq.
642				 */
643				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644					sctp_log_strm_del(control, NULL,
645					    SCTP_STR_LOG_FROM_IMMED_DEL);
646				}
647				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648				sctp_add_to_readq(stcb->sctp_ep, stcb,
649				    control,
650				    &stcb->sctp_socket->so_rcv, 1,
651				    SCTP_READ_LOCK_NOT_HELD,
652				    SCTP_SO_NOT_LOCKED);
653				continue;
654			}
655			break;
656		}
657	}
658	if (queue_needed) {
659		/*
660		 * Ok, we did not deliver this guy, find the correct place
661		 * to put it on the queue.
662		 */
663		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
664			goto protocol_error;
665		}
666		if (TAILQ_EMPTY(&strm->inqueue)) {
667			/* Empty queue */
668			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
669				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
670			}
671			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
672		} else {
673			TAILQ_FOREACH(at, &strm->inqueue, next) {
674				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
675					/*
676					 * one in queue is bigger than the
677					 * new one, insert before this one
678					 */
679					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
680						sctp_log_strm_del(control, at,
681						    SCTP_STR_LOG_FROM_INSERT_MD);
682					}
683					TAILQ_INSERT_BEFORE(at, control, next);
684					break;
685				} else if (at->sinfo_ssn == control->sinfo_ssn) {
686					/*
687					 * Gak, He sent me a duplicate str
688					 * seq number
689					 */
690					/*
691					 * foo bar, I guess I will just free
692					 * this new guy, should we abort
693					 * too? FIX ME MAYBE? Or it COULD be
694					 * that the SSN's have wrapped.
695					 * Maybe I should compare to TSN
696					 * somehow... sigh for now just blow
697					 * away the chunk!
698					 */
699
700					if (control->data)
701						sctp_m_freem(control->data);
702					control->data = NULL;
703					asoc->size_on_all_streams -= control->length;
704					sctp_ucount_decr(asoc->cnt_on_all_streams);
705					if (control->whoFrom) {
706						sctp_free_remote_addr(control->whoFrom);
707						control->whoFrom = NULL;
708					}
709					sctp_free_a_readq(stcb, control);
710					return;
711				} else {
712					if (TAILQ_NEXT(at, next) == NULL) {
713						/*
714						 * We are at the end, insert
715						 * it after this one
716						 */
717						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
718							sctp_log_strm_del(control, at,
719							    SCTP_STR_LOG_FROM_INSERT_TL);
720						}
721						TAILQ_INSERT_AFTER(&strm->inqueue,
722						    at, control, next);
723						break;
724					}
725				}
726			}
727		}
728	}
729}
730
731/*
732 * Returns two things: You get the total size of the deliverable parts of the
733 * first fragmented message on the reassembly queue. And you get a 1 back if
734 * all of the message is ready or a 0 back if the message is still incomplete
735 */
736static int
737sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
738{
739	struct sctp_tmit_chunk *chk;
740	uint32_t tsn;
741
742	*t_size = 0;
743	chk = TAILQ_FIRST(&asoc->reasmqueue);
744	if (chk == NULL) {
745		/* nothing on the queue */
746		return (0);
747	}
748	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
749		/* Not a first on the queue */
750		return (0);
751	}
752	tsn = chk->rec.data.TSN_seq;
753	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
754		if (tsn != chk->rec.data.TSN_seq) {
755			return (0);
756		}
757		*t_size += chk->send_size;
758		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
759			return (1);
760		}
761		tsn++;
762	}
763	return (0);
764}
765
766static void
767sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
768{
769	struct sctp_tmit_chunk *chk;
770	uint16_t nxt_todel;
771	uint32_t tsize, pd_point;
772
773doit_again:
774	chk = TAILQ_FIRST(&asoc->reasmqueue);
775	if (chk == NULL) {
776		/* Huh? */
777		asoc->size_on_reasm_queue = 0;
778		asoc->cnt_on_reasm_queue = 0;
779		return;
780	}
781	if (asoc->fragmented_delivery_inprogress == 0) {
782		nxt_todel =
783		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
784		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
785		    (nxt_todel == chk->rec.data.stream_seq ||
786		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
787			/*
788			 * Yep the first one is here and its ok to deliver
789			 * but should we?
790			 */
791			if (stcb->sctp_socket) {
792				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
793				    stcb->sctp_ep->partial_delivery_point);
794			} else {
795				pd_point = stcb->sctp_ep->partial_delivery_point;
796			}
797			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
798
799				/*
800				 * Yes, we setup to start reception, by
801				 * backing down the TSN just in case we
802				 * can't deliver. If we
803				 */
804				asoc->fragmented_delivery_inprogress = 1;
805				asoc->tsn_last_delivered =
806				    chk->rec.data.TSN_seq - 1;
807				asoc->str_of_pdapi =
808				    chk->rec.data.stream_number;
809				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
810				asoc->pdapi_ppid = chk->rec.data.payloadtype;
811				asoc->fragment_flags = chk->rec.data.rcv_flags;
812				sctp_service_reassembly(stcb, asoc);
813			}
814		}
815	} else {
816		/*
817		 * Service re-assembly will deliver stream data queued at
818		 * the end of fragmented delivery.. but it wont know to go
819		 * back and call itself again... we do that here with the
820		 * got doit_again
821		 */
822		sctp_service_reassembly(stcb, asoc);
823		if (asoc->fragmented_delivery_inprogress == 0) {
824			/*
825			 * finished our Fragmented delivery, could be more
826			 * waiting?
827			 */
828			goto doit_again;
829		}
830	}
831}
832
833/*
834 * Dump onto the re-assembly queue, in its proper place. After dumping on the
835 * queue, see if anthing can be delivered. If so pull it off (or as much as
836 * we can. If we run out of space then we must dump what we can and set the
837 * appropriate flag to say we queued what we could.
838 */
839static void
840sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
841    struct sctp_tmit_chunk *chk, int *abort_flag)
842{
843	struct mbuf *oper;
844	uint32_t cum_ackp1, prev_tsn, post_tsn;
845	struct sctp_tmit_chunk *at, *prev, *next;
846
847	prev = next = NULL;
848	cum_ackp1 = asoc->tsn_last_delivered + 1;
849	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
850		/* This is the first one on the queue */
851		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
852		/*
853		 * we do not check for delivery of anything when only one
854		 * fragment is here
855		 */
856		asoc->size_on_reasm_queue = chk->send_size;
857		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
858		if (chk->rec.data.TSN_seq == cum_ackp1) {
859			if (asoc->fragmented_delivery_inprogress == 0 &&
860			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
861			    SCTP_DATA_FIRST_FRAG) {
862				/*
863				 * An empty queue, no delivery inprogress,
864				 * we hit the next one and it does NOT have
865				 * a FIRST fragment mark.
866				 */
867				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
868				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
869				    0, M_DONTWAIT, 1, MT_DATA);
870
871				if (oper) {
872					struct sctp_paramhdr *ph;
873					uint32_t *ippp;
874
875					SCTP_BUF_LEN(oper) =
876					    sizeof(struct sctp_paramhdr) +
877					    (sizeof(uint32_t) * 3);
878					ph = mtod(oper, struct sctp_paramhdr *);
879					ph->param_type =
880					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
881					ph->param_length = htons(SCTP_BUF_LEN(oper));
882					ippp = (uint32_t *) (ph + 1);
883					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
884					ippp++;
885					*ippp = chk->rec.data.TSN_seq;
886					ippp++;
887					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
888
889				}
890				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
891				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
892				*abort_flag = 1;
893			} else if (asoc->fragmented_delivery_inprogress &&
894			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
895				/*
896				 * We are doing a partial delivery and the
897				 * NEXT chunk MUST be either the LAST or
898				 * MIDDLE fragment NOT a FIRST
899				 */
900				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
901				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
902				    0, M_DONTWAIT, 1, MT_DATA);
903				if (oper) {
904					struct sctp_paramhdr *ph;
905					uint32_t *ippp;
906
907					SCTP_BUF_LEN(oper) =
908					    sizeof(struct sctp_paramhdr) +
909					    (3 * sizeof(uint32_t));
910					ph = mtod(oper, struct sctp_paramhdr *);
911					ph->param_type =
912					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
913					ph->param_length = htons(SCTP_BUF_LEN(oper));
914					ippp = (uint32_t *) (ph + 1);
915					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
916					ippp++;
917					*ippp = chk->rec.data.TSN_seq;
918					ippp++;
919					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
920				}
921				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
922				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
923				*abort_flag = 1;
924			} else if (asoc->fragmented_delivery_inprogress) {
925				/*
926				 * Here we are ok with a MIDDLE or LAST
927				 * piece
928				 */
929				if (chk->rec.data.stream_number !=
930				    asoc->str_of_pdapi) {
931					/* Got to be the right STR No */
932					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
933					    chk->rec.data.stream_number,
934					    asoc->str_of_pdapi);
935					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
936					    0, M_DONTWAIT, 1, MT_DATA);
937					if (oper) {
938						struct sctp_paramhdr *ph;
939						uint32_t *ippp;
940
941						SCTP_BUF_LEN(oper) =
942						    sizeof(struct sctp_paramhdr) +
943						    (sizeof(uint32_t) * 3);
944						ph = mtod(oper,
945						    struct sctp_paramhdr *);
946						ph->param_type =
947						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
948						ph->param_length =
949						    htons(SCTP_BUF_LEN(oper));
950						ippp = (uint32_t *) (ph + 1);
951						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
952						ippp++;
953						*ippp = chk->rec.data.TSN_seq;
954						ippp++;
955						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
956					}
957					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
958					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
959					*abort_flag = 1;
960				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
961					    SCTP_DATA_UNORDERED &&
962				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
963					/* Got to be the right STR Seq */
964					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
965					    chk->rec.data.stream_seq,
966					    asoc->ssn_of_pdapi);
967					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
968					    0, M_DONTWAIT, 1, MT_DATA);
969					if (oper) {
970						struct sctp_paramhdr *ph;
971						uint32_t *ippp;
972
973						SCTP_BUF_LEN(oper) =
974						    sizeof(struct sctp_paramhdr) +
975						    (3 * sizeof(uint32_t));
976						ph = mtod(oper,
977						    struct sctp_paramhdr *);
978						ph->param_type =
979						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
980						ph->param_length =
981						    htons(SCTP_BUF_LEN(oper));
982						ippp = (uint32_t *) (ph + 1);
983						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
984						ippp++;
985						*ippp = chk->rec.data.TSN_seq;
986						ippp++;
987						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
988
989					}
990					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
991					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
992					*abort_flag = 1;
993				}
994			}
995		}
996		return;
997	}
998	/* Find its place */
999	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1000		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1001			/*
1002			 * one in queue is bigger than the new one, insert
1003			 * before this one
1004			 */
1005			/* A check */
1006			asoc->size_on_reasm_queue += chk->send_size;
1007			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1008			next = at;
1009			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1010			break;
1011		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1012			/* Gak, He sent me a duplicate str seq number */
1013			/*
1014			 * foo bar, I guess I will just free this new guy,
1015			 * should we abort too? FIX ME MAYBE? Or it COULD be
1016			 * that the SSN's have wrapped. Maybe I should
1017			 * compare to TSN somehow... sigh for now just blow
1018			 * away the chunk!
1019			 */
1020			if (chk->data) {
1021				sctp_m_freem(chk->data);
1022				chk->data = NULL;
1023			}
1024			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1025			return;
1026		} else {
1027			prev = at;
1028			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1029				/*
1030				 * We are at the end, insert it after this
1031				 * one
1032				 */
1033				/* check it first */
1034				asoc->size_on_reasm_queue += chk->send_size;
1035				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1036				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1037				break;
1038			}
1039		}
1040	}
1041	/* Now the audits */
1042	if (prev) {
1043		prev_tsn = chk->rec.data.TSN_seq - 1;
1044		if (prev_tsn == prev->rec.data.TSN_seq) {
1045			/*
1046			 * Ok the one I am dropping onto the end is the
1047			 * NEXT. A bit of valdiation here.
1048			 */
1049			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1050			    SCTP_DATA_FIRST_FRAG ||
1051			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1052			    SCTP_DATA_MIDDLE_FRAG) {
1053				/*
1054				 * Insert chk MUST be a MIDDLE or LAST
1055				 * fragment
1056				 */
1057				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1058				    SCTP_DATA_FIRST_FRAG) {
1059					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1060					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1061					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1062					    0, M_DONTWAIT, 1, MT_DATA);
1063					if (oper) {
1064						struct sctp_paramhdr *ph;
1065						uint32_t *ippp;
1066
1067						SCTP_BUF_LEN(oper) =
1068						    sizeof(struct sctp_paramhdr) +
1069						    (3 * sizeof(uint32_t));
1070						ph = mtod(oper,
1071						    struct sctp_paramhdr *);
1072						ph->param_type =
1073						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1074						ph->param_length =
1075						    htons(SCTP_BUF_LEN(oper));
1076						ippp = (uint32_t *) (ph + 1);
1077						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1078						ippp++;
1079						*ippp = chk->rec.data.TSN_seq;
1080						ippp++;
1081						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1082
1083					}
1084					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1085					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1086					*abort_flag = 1;
1087					return;
1088				}
1089				if (chk->rec.data.stream_number !=
1090				    prev->rec.data.stream_number) {
1091					/*
1092					 * Huh, need the correct STR here,
1093					 * they must be the same.
1094					 */
1095					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1096					    chk->rec.data.stream_number,
1097					    prev->rec.data.stream_number);
1098					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1099					    0, M_DONTWAIT, 1, MT_DATA);
1100					if (oper) {
1101						struct sctp_paramhdr *ph;
1102						uint32_t *ippp;
1103
1104						SCTP_BUF_LEN(oper) =
1105						    sizeof(struct sctp_paramhdr) +
1106						    (3 * sizeof(uint32_t));
1107						ph = mtod(oper,
1108						    struct sctp_paramhdr *);
1109						ph->param_type =
1110						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1111						ph->param_length =
1112						    htons(SCTP_BUF_LEN(oper));
1113						ippp = (uint32_t *) (ph + 1);
1114						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1115						ippp++;
1116						*ippp = chk->rec.data.TSN_seq;
1117						ippp++;
1118						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1119					}
1120					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1121					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1122					*abort_flag = 1;
1123					return;
1124				}
1125				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1126				    chk->rec.data.stream_seq !=
1127				    prev->rec.data.stream_seq) {
1128					/*
1129					 * Huh, need the correct STR here,
1130					 * they must be the same.
1131					 */
1132					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1133					    chk->rec.data.stream_seq,
1134					    prev->rec.data.stream_seq);
1135					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1136					    0, M_DONTWAIT, 1, MT_DATA);
1137					if (oper) {
1138						struct sctp_paramhdr *ph;
1139						uint32_t *ippp;
1140
1141						SCTP_BUF_LEN(oper) =
1142						    sizeof(struct sctp_paramhdr) +
1143						    (3 * sizeof(uint32_t));
1144						ph = mtod(oper,
1145						    struct sctp_paramhdr *);
1146						ph->param_type =
1147						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1148						ph->param_length =
1149						    htons(SCTP_BUF_LEN(oper));
1150						ippp = (uint32_t *) (ph + 1);
1151						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1152						ippp++;
1153						*ippp = chk->rec.data.TSN_seq;
1154						ippp++;
1155						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1156					}
1157					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1158					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1159					*abort_flag = 1;
1160					return;
1161				}
1162			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1163			    SCTP_DATA_LAST_FRAG) {
1164				/* Insert chk MUST be a FIRST */
1165				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1166				    SCTP_DATA_FIRST_FRAG) {
1167					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1168					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1169					    0, M_DONTWAIT, 1, MT_DATA);
1170					if (oper) {
1171						struct sctp_paramhdr *ph;
1172						uint32_t *ippp;
1173
1174						SCTP_BUF_LEN(oper) =
1175						    sizeof(struct sctp_paramhdr) +
1176						    (3 * sizeof(uint32_t));
1177						ph = mtod(oper,
1178						    struct sctp_paramhdr *);
1179						ph->param_type =
1180						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1181						ph->param_length =
1182						    htons(SCTP_BUF_LEN(oper));
1183						ippp = (uint32_t *) (ph + 1);
1184						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1185						ippp++;
1186						*ippp = chk->rec.data.TSN_seq;
1187						ippp++;
1188						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1189
1190					}
1191					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1192					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1193					*abort_flag = 1;
1194					return;
1195				}
1196			}
1197		}
1198	}
1199	if (next) {
1200		post_tsn = chk->rec.data.TSN_seq + 1;
1201		if (post_tsn == next->rec.data.TSN_seq) {
1202			/*
1203			 * Ok the one I am inserting ahead of is my NEXT
1204			 * one. A bit of valdiation here.
1205			 */
1206			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1207				/* Insert chk MUST be a last fragment */
1208				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1209				    != SCTP_DATA_LAST_FRAG) {
1210					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1211					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1212					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1213					    0, M_DONTWAIT, 1, MT_DATA);
1214					if (oper) {
1215						struct sctp_paramhdr *ph;
1216						uint32_t *ippp;
1217
1218						SCTP_BUF_LEN(oper) =
1219						    sizeof(struct sctp_paramhdr) +
1220						    (3 * sizeof(uint32_t));
1221						ph = mtod(oper,
1222						    struct sctp_paramhdr *);
1223						ph->param_type =
1224						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1225						ph->param_length =
1226						    htons(SCTP_BUF_LEN(oper));
1227						ippp = (uint32_t *) (ph + 1);
1228						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1229						ippp++;
1230						*ippp = chk->rec.data.TSN_seq;
1231						ippp++;
1232						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1233					}
1234					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1235					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1236					*abort_flag = 1;
1237					return;
1238				}
1239			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1240				    SCTP_DATA_MIDDLE_FRAG ||
1241				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1242			    SCTP_DATA_LAST_FRAG) {
1243				/*
1244				 * Insert chk CAN be MIDDLE or FIRST NOT
1245				 * LAST
1246				 */
1247				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1248				    SCTP_DATA_LAST_FRAG) {
1249					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1250					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1251					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1252					    0, M_DONTWAIT, 1, MT_DATA);
1253					if (oper) {
1254						struct sctp_paramhdr *ph;
1255						uint32_t *ippp;
1256
1257						SCTP_BUF_LEN(oper) =
1258						    sizeof(struct sctp_paramhdr) +
1259						    (3 * sizeof(uint32_t));
1260						ph = mtod(oper,
1261						    struct sctp_paramhdr *);
1262						ph->param_type =
1263						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1264						ph->param_length =
1265						    htons(SCTP_BUF_LEN(oper));
1266						ippp = (uint32_t *) (ph + 1);
1267						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1268						ippp++;
1269						*ippp = chk->rec.data.TSN_seq;
1270						ippp++;
1271						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1272
1273					}
1274					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1275					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1276					*abort_flag = 1;
1277					return;
1278				}
1279				if (chk->rec.data.stream_number !=
1280				    next->rec.data.stream_number) {
1281					/*
1282					 * Huh, need the correct STR here,
1283					 * they must be the same.
1284					 */
1285					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1286					    chk->rec.data.stream_number,
1287					    next->rec.data.stream_number);
1288					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1289					    0, M_DONTWAIT, 1, MT_DATA);
1290					if (oper) {
1291						struct sctp_paramhdr *ph;
1292						uint32_t *ippp;
1293
1294						SCTP_BUF_LEN(oper) =
1295						    sizeof(struct sctp_paramhdr) +
1296						    (3 * sizeof(uint32_t));
1297						ph = mtod(oper,
1298						    struct sctp_paramhdr *);
1299						ph->param_type =
1300						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1301						ph->param_length =
1302						    htons(SCTP_BUF_LEN(oper));
1303						ippp = (uint32_t *) (ph + 1);
1304						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1305						ippp++;
1306						*ippp = chk->rec.data.TSN_seq;
1307						ippp++;
1308						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1309
1310					}
1311					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1312					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1313					*abort_flag = 1;
1314					return;
1315				}
1316				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1317				    chk->rec.data.stream_seq !=
1318				    next->rec.data.stream_seq) {
1319					/*
1320					 * Huh, need the correct STR here,
1321					 * they must be the same.
1322					 */
1323					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1324					    chk->rec.data.stream_seq,
1325					    next->rec.data.stream_seq);
1326					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1327					    0, M_DONTWAIT, 1, MT_DATA);
1328					if (oper) {
1329						struct sctp_paramhdr *ph;
1330						uint32_t *ippp;
1331
1332						SCTP_BUF_LEN(oper) =
1333						    sizeof(struct sctp_paramhdr) +
1334						    (3 * sizeof(uint32_t));
1335						ph = mtod(oper,
1336						    struct sctp_paramhdr *);
1337						ph->param_type =
1338						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1339						ph->param_length =
1340						    htons(SCTP_BUF_LEN(oper));
1341						ippp = (uint32_t *) (ph + 1);
1342						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1343						ippp++;
1344						*ippp = chk->rec.data.TSN_seq;
1345						ippp++;
1346						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1347					}
1348					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1349					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1350					*abort_flag = 1;
1351					return;
1352				}
1353			}
1354		}
1355	}
1356	/* Do we need to do some delivery? check */
1357	sctp_deliver_reasm_check(stcb, asoc);
1358}
1359
1360/*
1361 * This is an unfortunate routine. It checks to make sure a evil guy is not
1362 * stuffing us full of bad packet fragments. A broken peer could also do this
1363 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1364 * :< more cycles.
1365 */
1366static int
1367sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1368    uint32_t TSN_seq)
1369{
1370	struct sctp_tmit_chunk *at;
1371	uint32_t tsn_est;
1372
1373	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1374		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1375			/* is it one bigger? */
1376			tsn_est = at->rec.data.TSN_seq + 1;
1377			if (tsn_est == TSN_seq) {
1378				/* yep. It better be a last then */
1379				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1380				    SCTP_DATA_LAST_FRAG) {
1381					/*
1382					 * Ok this guy belongs next to a guy
1383					 * that is NOT last, it should be a
1384					 * middle/last, not a complete
1385					 * chunk.
1386					 */
1387					return (1);
1388				} else {
1389					/*
1390					 * This guy is ok since its a LAST
1391					 * and the new chunk is a fully
1392					 * self- contained one.
1393					 */
1394					return (0);
1395				}
1396			}
1397		} else if (TSN_seq == at->rec.data.TSN_seq) {
1398			/* Software error since I have a dup? */
1399			return (1);
1400		} else {
1401			/*
1402			 * Ok, 'at' is larger than new chunk but does it
1403			 * need to be right before it.
1404			 */
1405			tsn_est = TSN_seq + 1;
1406			if (tsn_est == at->rec.data.TSN_seq) {
1407				/* Yep, It better be a first */
1408				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1409				    SCTP_DATA_FIRST_FRAG) {
1410					return (1);
1411				} else {
1412					return (0);
1413				}
1414			}
1415		}
1416	}
1417	return (0);
1418}
1419
1420
1421static int
1422sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1423    struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1424    struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1425    int *break_flag, int last_chunk)
1426{
1427	/* Process a data chunk */
1428	/* struct sctp_tmit_chunk *chk; */
1429	struct sctp_tmit_chunk *chk;
1430	uint32_t tsn, gap;
1431	struct mbuf *dmbuf;
1432	int the_len;
1433	int need_reasm_check = 0;
1434	uint16_t strmno, strmseq;
1435	struct mbuf *oper;
1436	struct sctp_queued_to_read *control;
1437	int ordered;
1438	uint32_t protocol_id;
1439	uint8_t chunk_flags;
1440	struct sctp_stream_reset_list *liste;
1441
1442	chk = NULL;
1443	tsn = ntohl(ch->dp.tsn);
1444	chunk_flags = ch->ch.chunk_flags;
1445	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1446		asoc->send_sack = 1;
1447	}
1448	protocol_id = ch->dp.protocol_id;
1449	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1450	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1451		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1452	}
1453	if (stcb == NULL) {
1454		return (0);
1455	}
1456	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1457	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1458		/* It is a duplicate */
1459		SCTP_STAT_INCR(sctps_recvdupdata);
1460		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1461			/* Record a dup for the next outbound sack */
1462			asoc->dup_tsns[asoc->numduptsns] = tsn;
1463			asoc->numduptsns++;
1464		}
1465		asoc->send_sack = 1;
1466		return (0);
1467	}
1468	/* Calculate the number of TSN's between the base and this TSN */
1469	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1470	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1471		/* Can't hold the bit in the mapping at max array, toss it */
1472		return (0);
1473	}
1474	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1475		SCTP_TCB_LOCK_ASSERT(stcb);
1476		if (sctp_expand_mapping_array(asoc, gap)) {
1477			/* Can't expand, drop it */
1478			return (0);
1479		}
1480	}
1481	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1482		*high_tsn = tsn;
1483	}
1484	/* See if we have received this one already */
1485	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1486	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1487		SCTP_STAT_INCR(sctps_recvdupdata);
1488		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1489			/* Record a dup for the next outbound sack */
1490			asoc->dup_tsns[asoc->numduptsns] = tsn;
1491			asoc->numduptsns++;
1492		}
1493		asoc->send_sack = 1;
1494		return (0);
1495	}
1496	/*
1497	 * Check to see about the GONE flag, duplicates would cause a sack
1498	 * to be sent up above
1499	 */
1500	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1501	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1502	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1503	    ) {
1504		/*
1505		 * wait a minute, this guy is gone, there is no longer a
1506		 * receiver. Send peer an ABORT!
1507		 */
1508		struct mbuf *op_err;
1509
1510		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1511		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1512		*abort_flag = 1;
1513		return (0);
1514	}
1515	/*
1516	 * Now before going further we see if there is room. If NOT then we
1517	 * MAY let one through only IF this TSN is the one we are waiting
1518	 * for on a partial delivery API.
1519	 */
1520
1521	/* now do the tests */
1522	if (((asoc->cnt_on_all_streams +
1523	    asoc->cnt_on_reasm_queue +
1524	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1525	    (((int)asoc->my_rwnd) <= 0)) {
1526		/*
1527		 * When we have NO room in the rwnd we check to make sure
1528		 * the reader is doing its job...
1529		 */
1530		if (stcb->sctp_socket->so_rcv.sb_cc) {
1531			/* some to read, wake-up */
1532#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1533			struct socket *so;
1534
1535			so = SCTP_INP_SO(stcb->sctp_ep);
1536			atomic_add_int(&stcb->asoc.refcnt, 1);
1537			SCTP_TCB_UNLOCK(stcb);
1538			SCTP_SOCKET_LOCK(so, 1);
1539			SCTP_TCB_LOCK(stcb);
1540			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1541			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1542				/* assoc was freed while we were unlocked */
1543				SCTP_SOCKET_UNLOCK(so, 1);
1544				return (0);
1545			}
1546#endif
1547			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1548#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1549			SCTP_SOCKET_UNLOCK(so, 1);
1550#endif
1551		}
1552		/* now is it in the mapping array of what we have accepted? */
1553		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1554		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1555			/* Nope not in the valid range dump it */
1556			sctp_set_rwnd(stcb, asoc);
1557			if ((asoc->cnt_on_all_streams +
1558			    asoc->cnt_on_reasm_queue +
1559			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1560				SCTP_STAT_INCR(sctps_datadropchklmt);
1561			} else {
1562				SCTP_STAT_INCR(sctps_datadroprwnd);
1563			}
1564			*break_flag = 1;
1565			return (0);
1566		}
1567	}
1568	strmno = ntohs(ch->dp.stream_id);
1569	if (strmno >= asoc->streamincnt) {
1570		struct sctp_paramhdr *phdr;
1571		struct mbuf *mb;
1572
1573		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1574		    0, M_DONTWAIT, 1, MT_DATA);
1575		if (mb != NULL) {
1576			/* add some space up front so prepend will work well */
1577			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1578			phdr = mtod(mb, struct sctp_paramhdr *);
1579			/*
1580			 * Error causes are just param's and this one has
1581			 * two back to back phdr, one with the error type
1582			 * and size, the other with the streamid and a rsvd
1583			 */
1584			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1585			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1586			phdr->param_length =
1587			    htons(sizeof(struct sctp_paramhdr) * 2);
1588			phdr++;
1589			/* We insert the stream in the type field */
1590			phdr->param_type = ch->dp.stream_id;
1591			/* And set the length to 0 for the rsvd field */
1592			phdr->param_length = 0;
1593			sctp_queue_op_err(stcb, mb);
1594		}
1595		SCTP_STAT_INCR(sctps_badsid);
1596		SCTP_TCB_LOCK_ASSERT(stcb);
1597		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1598		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1599			asoc->highest_tsn_inside_nr_map = tsn;
1600		}
1601		if (tsn == (asoc->cumulative_tsn + 1)) {
1602			/* Update cum-ack */
1603			asoc->cumulative_tsn = tsn;
1604		}
1605		return (0);
1606	}
1607	/*
1608	 * Before we continue lets validate that we are not being fooled by
1609	 * an evil attacker. We can only have 4k chunks based on our TSN
1610	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1611	 * way our stream sequence numbers could have wrapped. We of course
1612	 * only validate the FIRST fragment so the bit must be set.
1613	 */
1614	strmseq = ntohs(ch->dp.stream_sequence);
1615#ifdef SCTP_ASOCLOG_OF_TSNS
1616	SCTP_TCB_LOCK_ASSERT(stcb);
1617	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1618		asoc->tsn_in_at = 0;
1619		asoc->tsn_in_wrapped = 1;
1620	}
1621	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1622	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1623	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1624	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1625	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1626	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1627	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1628	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1629	asoc->tsn_in_at++;
1630#endif
1631	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1632	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1633	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1634	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1635		/* The incoming sseq is behind where we last delivered? */
1636		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1637		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1638		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1639		    0, M_DONTWAIT, 1, MT_DATA);
1640		if (oper) {
1641			struct sctp_paramhdr *ph;
1642			uint32_t *ippp;
1643
1644			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1645			    (3 * sizeof(uint32_t));
1646			ph = mtod(oper, struct sctp_paramhdr *);
1647			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1648			ph->param_length = htons(SCTP_BUF_LEN(oper));
1649			ippp = (uint32_t *) (ph + 1);
1650			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1651			ippp++;
1652			*ippp = tsn;
1653			ippp++;
1654			*ippp = ((strmno << 16) | strmseq);
1655
1656		}
1657		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1658		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1659		*abort_flag = 1;
1660		return (0);
1661	}
1662	/************************************
1663	 * From here down we may find ch-> invalid
1664	 * so its a good idea NOT to use it.
1665	 *************************************/
1666
1667	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1668	if (last_chunk == 0) {
1669		dmbuf = SCTP_M_COPYM(*m,
1670		    (offset + sizeof(struct sctp_data_chunk)),
1671		    the_len, M_DONTWAIT);
1672#ifdef SCTP_MBUF_LOGGING
1673		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1674			struct mbuf *mat;
1675
1676			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1677				if (SCTP_BUF_IS_EXTENDED(mat)) {
1678					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1679				}
1680			}
1681		}
1682#endif
1683	} else {
1684		/* We can steal the last chunk */
1685		int l_len;
1686
1687		dmbuf = *m;
1688		/* lop off the top part */
1689		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1690		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1691			l_len = SCTP_BUF_LEN(dmbuf);
1692		} else {
1693			/*
1694			 * need to count up the size hopefully does not hit
1695			 * this to often :-0
1696			 */
1697			struct mbuf *lat;
1698
1699			l_len = 0;
1700			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1701				l_len += SCTP_BUF_LEN(lat);
1702			}
1703		}
1704		if (l_len > the_len) {
1705			/* Trim the end round bytes off  too */
1706			m_adj(dmbuf, -(l_len - the_len));
1707		}
1708	}
1709	if (dmbuf == NULL) {
1710		SCTP_STAT_INCR(sctps_nomem);
1711		return (0);
1712	}
1713	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1714	    asoc->fragmented_delivery_inprogress == 0 &&
1715	    TAILQ_EMPTY(&asoc->resetHead) &&
1716	    ((ordered == 0) ||
1717	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1718	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1719		/* Candidate for express delivery */
1720		/*
1721		 * Its not fragmented, No PD-API is up, Nothing in the
1722		 * delivery queue, Its un-ordered OR ordered and the next to
1723		 * deliver AND nothing else is stuck on the stream queue,
1724		 * And there is room for it in the socket buffer. Lets just
1725		 * stuff it up the buffer....
1726		 */
1727
1728		/* It would be nice to avoid this copy if we could :< */
1729		sctp_alloc_a_readq(stcb, control);
1730		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1731		    protocol_id,
1732		    stcb->asoc.context,
1733		    strmno, strmseq,
1734		    chunk_flags,
1735		    dmbuf);
1736		if (control == NULL) {
1737			goto failed_express_del;
1738		}
1739		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1740		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1741			asoc->highest_tsn_inside_nr_map = tsn;
1742		}
1743		sctp_add_to_readq(stcb->sctp_ep, stcb,
1744		    control, &stcb->sctp_socket->so_rcv,
1745		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1746
1747		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1748			/* for ordered, bump what we delivered */
1749			asoc->strmin[strmno].last_sequence_delivered++;
1750		}
1751		SCTP_STAT_INCR(sctps_recvexpress);
1752		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1753			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1754			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1755		}
1756		control = NULL;
1757
1758		goto finish_express_del;
1759	}
1760failed_express_del:
1761	/* If we reach here this is a new chunk */
1762	chk = NULL;
1763	control = NULL;
1764	/* Express for fragmented delivery? */
1765	if ((asoc->fragmented_delivery_inprogress) &&
1766	    (stcb->asoc.control_pdapi) &&
1767	    (asoc->str_of_pdapi == strmno) &&
1768	    (asoc->ssn_of_pdapi == strmseq)
1769	    ) {
1770		control = stcb->asoc.control_pdapi;
1771		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1772			/* Can't be another first? */
1773			goto failed_pdapi_express_del;
1774		}
1775		if (tsn == (control->sinfo_tsn + 1)) {
1776			/* Yep, we can add it on */
1777			int end = 0;
1778
1779			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1780				end = 1;
1781			}
1782			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1783			    tsn,
1784			    &stcb->sctp_socket->so_rcv)) {
1785				SCTP_PRINTF("Append fails end:%d\n", end);
1786				goto failed_pdapi_express_del;
1787			}
1788			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1789			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1790				asoc->highest_tsn_inside_nr_map = tsn;
1791			}
1792			SCTP_STAT_INCR(sctps_recvexpressm);
1793			control->sinfo_tsn = tsn;
1794			asoc->tsn_last_delivered = tsn;
1795			asoc->fragment_flags = chunk_flags;
1796			asoc->tsn_of_pdapi_last_delivered = tsn;
1797			asoc->last_flags_delivered = chunk_flags;
1798			asoc->last_strm_seq_delivered = strmseq;
1799			asoc->last_strm_no_delivered = strmno;
1800			if (end) {
1801				/* clean up the flags and such */
1802				asoc->fragmented_delivery_inprogress = 0;
1803				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1804					asoc->strmin[strmno].last_sequence_delivered++;
1805				}
1806				stcb->asoc.control_pdapi = NULL;
1807				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1808					/*
1809					 * There could be another message
1810					 * ready
1811					 */
1812					need_reasm_check = 1;
1813				}
1814			}
1815			control = NULL;
1816			goto finish_express_del;
1817		}
1818	}
1819failed_pdapi_express_del:
1820	control = NULL;
1821	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1822		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1823		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1824			asoc->highest_tsn_inside_nr_map = tsn;
1825		}
1826	} else {
1827		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1828		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1829			asoc->highest_tsn_inside_map = tsn;
1830		}
1831	}
1832	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1833		sctp_alloc_a_chunk(stcb, chk);
1834		if (chk == NULL) {
1835			/* No memory so we drop the chunk */
1836			SCTP_STAT_INCR(sctps_nomem);
1837			if (last_chunk == 0) {
1838				/* we copied it, free the copy */
1839				sctp_m_freem(dmbuf);
1840			}
1841			return (0);
1842		}
1843		chk->rec.data.TSN_seq = tsn;
1844		chk->no_fr_allowed = 0;
1845		chk->rec.data.stream_seq = strmseq;
1846		chk->rec.data.stream_number = strmno;
1847		chk->rec.data.payloadtype = protocol_id;
1848		chk->rec.data.context = stcb->asoc.context;
1849		chk->rec.data.doing_fast_retransmit = 0;
1850		chk->rec.data.rcv_flags = chunk_flags;
1851		chk->asoc = asoc;
1852		chk->send_size = the_len;
1853		chk->whoTo = net;
1854		atomic_add_int(&net->ref_count, 1);
1855		chk->data = dmbuf;
1856	} else {
1857		sctp_alloc_a_readq(stcb, control);
1858		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1859		    protocol_id,
1860		    stcb->asoc.context,
1861		    strmno, strmseq,
1862		    chunk_flags,
1863		    dmbuf);
1864		if (control == NULL) {
1865			/* No memory so we drop the chunk */
1866			SCTP_STAT_INCR(sctps_nomem);
1867			if (last_chunk == 0) {
1868				/* we copied it, free the copy */
1869				sctp_m_freem(dmbuf);
1870			}
1871			return (0);
1872		}
1873		control->length = the_len;
1874	}
1875
1876	/* Mark it as received */
1877	/* Now queue it where it belongs */
1878	if (control != NULL) {
1879		/* First a sanity check */
1880		if (asoc->fragmented_delivery_inprogress) {
1881			/*
1882			 * Ok, we have a fragmented delivery in progress if
1883			 * this chunk is next to deliver OR belongs in our
1884			 * view to the reassembly, the peer is evil or
1885			 * broken.
1886			 */
1887			uint32_t estimate_tsn;
1888
1889			estimate_tsn = asoc->tsn_last_delivered + 1;
1890			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1891			    (estimate_tsn == control->sinfo_tsn)) {
1892				/* Evil/Broke peer */
1893				sctp_m_freem(control->data);
1894				control->data = NULL;
1895				if (control->whoFrom) {
1896					sctp_free_remote_addr(control->whoFrom);
1897					control->whoFrom = NULL;
1898				}
1899				sctp_free_a_readq(stcb, control);
1900				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1901				    0, M_DONTWAIT, 1, MT_DATA);
1902				if (oper) {
1903					struct sctp_paramhdr *ph;
1904					uint32_t *ippp;
1905
1906					SCTP_BUF_LEN(oper) =
1907					    sizeof(struct sctp_paramhdr) +
1908					    (3 * sizeof(uint32_t));
1909					ph = mtod(oper, struct sctp_paramhdr *);
1910					ph->param_type =
1911					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1912					ph->param_length = htons(SCTP_BUF_LEN(oper));
1913					ippp = (uint32_t *) (ph + 1);
1914					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1915					ippp++;
1916					*ippp = tsn;
1917					ippp++;
1918					*ippp = ((strmno << 16) | strmseq);
1919				}
1920				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1921				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1922				*abort_flag = 1;
1923				return (0);
1924			} else {
1925				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1926					sctp_m_freem(control->data);
1927					control->data = NULL;
1928					if (control->whoFrom) {
1929						sctp_free_remote_addr(control->whoFrom);
1930						control->whoFrom = NULL;
1931					}
1932					sctp_free_a_readq(stcb, control);
1933
1934					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1935					    0, M_DONTWAIT, 1, MT_DATA);
1936					if (oper) {
1937						struct sctp_paramhdr *ph;
1938						uint32_t *ippp;
1939
1940						SCTP_BUF_LEN(oper) =
1941						    sizeof(struct sctp_paramhdr) +
1942						    (3 * sizeof(uint32_t));
1943						ph = mtod(oper,
1944						    struct sctp_paramhdr *);
1945						ph->param_type =
1946						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1947						ph->param_length =
1948						    htons(SCTP_BUF_LEN(oper));
1949						ippp = (uint32_t *) (ph + 1);
1950						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1951						ippp++;
1952						*ippp = tsn;
1953						ippp++;
1954						*ippp = ((strmno << 16) | strmseq);
1955					}
1956					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1957					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1958					*abort_flag = 1;
1959					return (0);
1960				}
1961			}
1962		} else {
1963			/* No PDAPI running */
1964			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1965				/*
1966				 * Reassembly queue is NOT empty validate
1967				 * that this tsn does not need to be in
1968				 * reasembly queue. If it does then our peer
1969				 * is broken or evil.
1970				 */
1971				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1972					sctp_m_freem(control->data);
1973					control->data = NULL;
1974					if (control->whoFrom) {
1975						sctp_free_remote_addr(control->whoFrom);
1976						control->whoFrom = NULL;
1977					}
1978					sctp_free_a_readq(stcb, control);
1979					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1980					    0, M_DONTWAIT, 1, MT_DATA);
1981					if (oper) {
1982						struct sctp_paramhdr *ph;
1983						uint32_t *ippp;
1984
1985						SCTP_BUF_LEN(oper) =
1986						    sizeof(struct sctp_paramhdr) +
1987						    (3 * sizeof(uint32_t));
1988						ph = mtod(oper,
1989						    struct sctp_paramhdr *);
1990						ph->param_type =
1991						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1992						ph->param_length =
1993						    htons(SCTP_BUF_LEN(oper));
1994						ippp = (uint32_t *) (ph + 1);
1995						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1996						ippp++;
1997						*ippp = tsn;
1998						ippp++;
1999						*ippp = ((strmno << 16) | strmseq);
2000					}
2001					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2002					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2003					*abort_flag = 1;
2004					return (0);
2005				}
2006			}
2007		}
2008		/* ok, if we reach here we have passed the sanity checks */
2009		if (chunk_flags & SCTP_DATA_UNORDERED) {
2010			/* queue directly into socket buffer */
2011			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2012			sctp_add_to_readq(stcb->sctp_ep, stcb,
2013			    control,
2014			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2015		} else {
2016			/*
2017			 * Special check for when streams are resetting. We
2018			 * could be more smart about this and check the
2019			 * actual stream to see if it is not being reset..
2020			 * that way we would not create a HOLB when amongst
2021			 * streams being reset and those not being reset.
2022			 *
2023			 * We take complete messages that have a stream reset
2024			 * intervening (aka the TSN is after where our
2025			 * cum-ack needs to be) off and put them on a
2026			 * pending_reply_queue. The reassembly ones we do
2027			 * not have to worry about since they are all sorted
2028			 * and proceessed by TSN order. It is only the
2029			 * singletons I must worry about.
2030			 */
2031			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2032			    SCTP_TSN_GT(tsn, liste->tsn)) {
2033				/*
2034				 * yep its past where we need to reset... go
2035				 * ahead and queue it.
2036				 */
2037				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2038					/* first one on */
2039					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2040				} else {
2041					struct sctp_queued_to_read *ctlOn,
2042					                   *nctlOn;
2043					unsigned char inserted = 0;
2044
2045					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2046						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2047							continue;
2048						} else {
2049							/* found it */
2050							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2051							inserted = 1;
2052							break;
2053						}
2054					}
2055					if (inserted == 0) {
2056						/*
2057						 * must be put at end, use
2058						 * prevP (all setup from
2059						 * loop) to setup nextP.
2060						 */
2061						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2062					}
2063				}
2064			} else {
2065				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2066				if (*abort_flag) {
2067					return (0);
2068				}
2069			}
2070		}
2071	} else {
2072		/* Into the re-assembly queue */
2073		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2074		if (*abort_flag) {
2075			/*
2076			 * the assoc is now gone and chk was put onto the
2077			 * reasm queue, which has all been freed.
2078			 */
2079			*m = NULL;
2080			return (0);
2081		}
2082	}
2083finish_express_del:
2084	if (tsn == (asoc->cumulative_tsn + 1)) {
2085		/* Update cum-ack */
2086		asoc->cumulative_tsn = tsn;
2087	}
2088	if (last_chunk) {
2089		*m = NULL;
2090	}
2091	if (ordered) {
2092		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2093	} else {
2094		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2095	}
2096	SCTP_STAT_INCR(sctps_recvdata);
2097	/* Set it present please */
2098	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2099		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2100	}
2101	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2102		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2103		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2104	}
2105	/* check the special flag for stream resets */
2106	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2107	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2108		/*
2109		 * we have finished working through the backlogged TSN's now
2110		 * time to reset streams. 1: call reset function. 2: free
2111		 * pending_reply space 3: distribute any chunks in
2112		 * pending_reply_queue.
2113		 */
2114		struct sctp_queued_to_read *ctl, *nctl;
2115
2116		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2117		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2118		SCTP_FREE(liste, SCTP_M_STRESET);
2119		/* sa_ignore FREED_MEMORY */
2120		liste = TAILQ_FIRST(&asoc->resetHead);
2121		if (TAILQ_EMPTY(&asoc->resetHead)) {
2122			/* All can be removed */
2123			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2124				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2125				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2126				if (*abort_flag) {
2127					return (0);
2128				}
2129			}
2130		} else {
2131			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2132				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2133					break;
2134				}
2135				/*
2136				 * if ctl->sinfo_tsn is <= liste->tsn we can
2137				 * process it which is the NOT of
2138				 * ctl->sinfo_tsn > liste->tsn
2139				 */
2140				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2141				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2142				if (*abort_flag) {
2143					return (0);
2144				}
2145			}
2146		}
2147		/*
2148		 * Now service re-assembly to pick up anything that has been
2149		 * held on reassembly queue?
2150		 */
2151		sctp_deliver_reasm_check(stcb, asoc);
2152		need_reasm_check = 0;
2153	}
2154	if (need_reasm_check) {
2155		/* Another one waits ? */
2156		sctp_deliver_reasm_check(stcb, asoc);
2157	}
2158	return (1);
2159}
2160
2161int8_t sctp_map_lookup_tab[256] = {
2162	0, 1, 0, 2, 0, 1, 0, 3,
2163	0, 1, 0, 2, 0, 1, 0, 4,
2164	0, 1, 0, 2, 0, 1, 0, 3,
2165	0, 1, 0, 2, 0, 1, 0, 5,
2166	0, 1, 0, 2, 0, 1, 0, 3,
2167	0, 1, 0, 2, 0, 1, 0, 4,
2168	0, 1, 0, 2, 0, 1, 0, 3,
2169	0, 1, 0, 2, 0, 1, 0, 6,
2170	0, 1, 0, 2, 0, 1, 0, 3,
2171	0, 1, 0, 2, 0, 1, 0, 4,
2172	0, 1, 0, 2, 0, 1, 0, 3,
2173	0, 1, 0, 2, 0, 1, 0, 5,
2174	0, 1, 0, 2, 0, 1, 0, 3,
2175	0, 1, 0, 2, 0, 1, 0, 4,
2176	0, 1, 0, 2, 0, 1, 0, 3,
2177	0, 1, 0, 2, 0, 1, 0, 7,
2178	0, 1, 0, 2, 0, 1, 0, 3,
2179	0, 1, 0, 2, 0, 1, 0, 4,
2180	0, 1, 0, 2, 0, 1, 0, 3,
2181	0, 1, 0, 2, 0, 1, 0, 5,
2182	0, 1, 0, 2, 0, 1, 0, 3,
2183	0, 1, 0, 2, 0, 1, 0, 4,
2184	0, 1, 0, 2, 0, 1, 0, 3,
2185	0, 1, 0, 2, 0, 1, 0, 6,
2186	0, 1, 0, 2, 0, 1, 0, 3,
2187	0, 1, 0, 2, 0, 1, 0, 4,
2188	0, 1, 0, 2, 0, 1, 0, 3,
2189	0, 1, 0, 2, 0, 1, 0, 5,
2190	0, 1, 0, 2, 0, 1, 0, 3,
2191	0, 1, 0, 2, 0, 1, 0, 4,
2192	0, 1, 0, 2, 0, 1, 0, 3,
2193	0, 1, 0, 2, 0, 1, 0, 8
2194};
2195
2196
2197void
2198sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2199{
2200	/*
2201	 * Now we also need to check the mapping array in a couple of ways.
2202	 * 1) Did we move the cum-ack point?
2203	 *
2204	 * When you first glance at this you might think that all entries that
2205	 * make up the postion of the cum-ack would be in the nr-mapping
2206	 * array only.. i.e. things up to the cum-ack are always
2207	 * deliverable. Thats true with one exception, when its a fragmented
2208	 * message we may not deliver the data until some threshold (or all
2209	 * of it) is in place. So we must OR the nr_mapping_array and
2210	 * mapping_array to get a true picture of the cum-ack.
2211	 */
2212	struct sctp_association *asoc;
2213	int at;
2214	uint8_t val;
2215	int slide_from, slide_end, lgap, distance;
2216	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2217
2218	asoc = &stcb->asoc;
2219
2220	old_cumack = asoc->cumulative_tsn;
2221	old_base = asoc->mapping_array_base_tsn;
2222	old_highest = asoc->highest_tsn_inside_map;
2223	/*
2224	 * We could probably improve this a small bit by calculating the
2225	 * offset of the current cum-ack as the starting point.
2226	 */
2227	at = 0;
2228	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2229		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2230		if (val == 0xff) {
2231			at += 8;
2232		} else {
2233			/* there is a 0 bit */
2234			at += sctp_map_lookup_tab[val];
2235			break;
2236		}
2237	}
2238	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2239
2240	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2241	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2242#ifdef INVARIANTS
2243		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2244		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2245#else
2246		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2247		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2248		sctp_print_mapping_array(asoc);
2249		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2250			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2251		}
2252		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2253		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2254#endif
2255	}
2256	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2257		highest_tsn = asoc->highest_tsn_inside_nr_map;
2258	} else {
2259		highest_tsn = asoc->highest_tsn_inside_map;
2260	}
2261	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2262		/* The complete array was completed by a single FR */
2263		/* highest becomes the cum-ack */
2264		int clr;
2265
2266#ifdef INVARIANTS
2267		unsigned int i;
2268
2269#endif
2270
2271		/* clear the array */
2272		clr = ((at + 7) >> 3);
2273		if (clr > asoc->mapping_array_size) {
2274			clr = asoc->mapping_array_size;
2275		}
2276		memset(asoc->mapping_array, 0, clr);
2277		memset(asoc->nr_mapping_array, 0, clr);
2278#ifdef INVARIANTS
2279		for (i = 0; i < asoc->mapping_array_size; i++) {
2280			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2281				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2282				sctp_print_mapping_array(asoc);
2283			}
2284		}
2285#endif
2286		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2287		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2288	} else if (at >= 8) {
2289		/* we can slide the mapping array down */
2290		/* slide_from holds where we hit the first NON 0xff byte */
2291
2292		/*
2293		 * now calculate the ceiling of the move using our highest
2294		 * TSN value
2295		 */
2296		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2297		slide_end = (lgap >> 3);
2298		if (slide_end < slide_from) {
2299			sctp_print_mapping_array(asoc);
2300#ifdef INVARIANTS
2301			panic("impossible slide");
2302#else
2303			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2304			    lgap, slide_end, slide_from, at);
2305			return;
2306#endif
2307		}
2308		if (slide_end > asoc->mapping_array_size) {
2309#ifdef INVARIANTS
2310			panic("would overrun buffer");
2311#else
2312			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2313			    asoc->mapping_array_size, slide_end);
2314			slide_end = asoc->mapping_array_size;
2315#endif
2316		}
2317		distance = (slide_end - slide_from) + 1;
2318		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2319			sctp_log_map(old_base, old_cumack, old_highest,
2320			    SCTP_MAP_PREPARE_SLIDE);
2321			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2322			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2323		}
2324		if (distance + slide_from > asoc->mapping_array_size ||
2325		    distance < 0) {
2326			/*
2327			 * Here we do NOT slide forward the array so that
2328			 * hopefully when more data comes in to fill it up
2329			 * we will be able to slide it forward. Really I
2330			 * don't think this should happen :-0
2331			 */
2332
2333			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2334				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2335				    (uint32_t) asoc->mapping_array_size,
2336				    SCTP_MAP_SLIDE_NONE);
2337			}
2338		} else {
2339			int ii;
2340
2341			for (ii = 0; ii < distance; ii++) {
2342				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2343				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2344
2345			}
2346			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2347				asoc->mapping_array[ii] = 0;
2348				asoc->nr_mapping_array[ii] = 0;
2349			}
2350			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2351				asoc->highest_tsn_inside_map += (slide_from << 3);
2352			}
2353			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2354				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2355			}
2356			asoc->mapping_array_base_tsn += (slide_from << 3);
2357			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2358				sctp_log_map(asoc->mapping_array_base_tsn,
2359				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2360				    SCTP_MAP_SLIDE_RESULT);
2361			}
2362		}
2363	}
2364}
2365
2366void
2367sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2368{
2369	struct sctp_association *asoc;
2370	uint32_t highest_tsn;
2371
2372	asoc = &stcb->asoc;
2373	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2374		highest_tsn = asoc->highest_tsn_inside_nr_map;
2375	} else {
2376		highest_tsn = asoc->highest_tsn_inside_map;
2377	}
2378
2379	/*
2380	 * Now we need to see if we need to queue a sack or just start the
2381	 * timer (if allowed).
2382	 */
2383	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2384		/*
2385		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2386		 * sure SACK timer is off and instead send a SHUTDOWN and a
2387		 * SACK
2388		 */
2389		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2390			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2391			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2392		}
2393		sctp_send_shutdown(stcb,
2394		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2395		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2396	} else {
2397		int is_a_gap;
2398
2399		/* is there a gap now ? */
2400		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2401
2402		/*
2403		 * CMT DAC algorithm: increase number of packets received
2404		 * since last ack
2405		 */
2406		stcb->asoc.cmt_dac_pkts_rcvd++;
2407
2408		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2409							 * SACK */
2410		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2411							 * longer is one */
2412		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2413		    (is_a_gap) ||	/* is still a gap */
2414		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2415		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2416		    ) {
2417
2418			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2419			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2420			    (stcb->asoc.send_sack == 0) &&
2421			    (stcb->asoc.numduptsns == 0) &&
2422			    (stcb->asoc.delayed_ack) &&
2423			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2424
2425				/*
2426				 * CMT DAC algorithm: With CMT, delay acks
2427				 * even in the face of
2428				 *
2429				 * reordering. Therefore, if acks that do not
2430				 * have to be sent because of the above
2431				 * reasons, will be delayed. That is, acks
2432				 * that would have been sent due to gap
2433				 * reports will be delayed with DAC. Start
2434				 * the delayed ack timer.
2435				 */
2436				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2437				    stcb->sctp_ep, stcb, NULL);
2438			} else {
2439				/*
2440				 * Ok we must build a SACK since the timer
2441				 * is pending, we got our first packet OR
2442				 * there are gaps or duplicates.
2443				 */
2444				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2445				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2446			}
2447		} else {
2448			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2449				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2450				    stcb->sctp_ep, stcb, NULL);
2451			}
2452		}
2453	}
2454}
2455
2456void
2457sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2458{
2459	struct sctp_tmit_chunk *chk;
2460	uint32_t tsize, pd_point;
2461	uint16_t nxt_todel;
2462
2463	if (asoc->fragmented_delivery_inprogress) {
2464		sctp_service_reassembly(stcb, asoc);
2465	}
2466	/* Can we proceed further, i.e. the PD-API is complete */
2467	if (asoc->fragmented_delivery_inprogress) {
2468		/* no */
2469		return;
2470	}
2471	/*
2472	 * Now is there some other chunk I can deliver from the reassembly
2473	 * queue.
2474	 */
2475doit_again:
2476	chk = TAILQ_FIRST(&asoc->reasmqueue);
2477	if (chk == NULL) {
2478		asoc->size_on_reasm_queue = 0;
2479		asoc->cnt_on_reasm_queue = 0;
2480		return;
2481	}
2482	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2483	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2484	    ((nxt_todel == chk->rec.data.stream_seq) ||
2485	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2486		/*
2487		 * Yep the first one is here. We setup to start reception,
2488		 * by backing down the TSN just in case we can't deliver.
2489		 */
2490
2491		/*
2492		 * Before we start though either all of the message should
2493		 * be here or the socket buffer max or nothing on the
2494		 * delivery queue and something can be delivered.
2495		 */
2496		if (stcb->sctp_socket) {
2497			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2498			    stcb->sctp_ep->partial_delivery_point);
2499		} else {
2500			pd_point = stcb->sctp_ep->partial_delivery_point;
2501		}
2502		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2503			asoc->fragmented_delivery_inprogress = 1;
2504			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2505			asoc->str_of_pdapi = chk->rec.data.stream_number;
2506			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2507			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2508			asoc->fragment_flags = chk->rec.data.rcv_flags;
2509			sctp_service_reassembly(stcb, asoc);
2510			if (asoc->fragmented_delivery_inprogress == 0) {
2511				goto doit_again;
2512			}
2513		}
2514	}
2515}
2516
2517int
2518sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2519    struct sctphdr *sh, struct sctp_inpcb *inp,
2520    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2521    uint8_t use_mflowid, uint32_t mflowid,
2522    uint32_t vrf_id, uint16_t port)
2523{
2524	struct sctp_data_chunk *ch, chunk_buf;
2525	struct sctp_association *asoc;
2526	int num_chunks = 0;	/* number of control chunks processed */
2527	int stop_proc = 0;
2528	int chk_length, break_flag, last_chunk;
2529	int abort_flag = 0, was_a_gap;
2530	struct mbuf *m;
2531	uint32_t highest_tsn;
2532
2533	/* set the rwnd */
2534	sctp_set_rwnd(stcb, &stcb->asoc);
2535
2536	m = *mm;
2537	SCTP_TCB_LOCK_ASSERT(stcb);
2538	asoc = &stcb->asoc;
2539	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2540		highest_tsn = asoc->highest_tsn_inside_nr_map;
2541	} else {
2542		highest_tsn = asoc->highest_tsn_inside_map;
2543	}
2544	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2545	/*
2546	 * setup where we got the last DATA packet from for any SACK that
2547	 * may need to go out. Don't bump the net. This is done ONLY when a
2548	 * chunk is assigned.
2549	 */
2550	asoc->last_data_chunk_from = net;
2551
2552	/*-
2553	 * Now before we proceed we must figure out if this is a wasted
2554	 * cluster... i.e. it is a small packet sent in and yet the driver
2555	 * underneath allocated a full cluster for it. If so we must copy it
2556	 * to a smaller mbuf and free up the cluster mbuf. This will help
2557	 * with cluster starvation. Note for __Panda__ we don't do this
2558	 * since it has clusters all the way down to 64 bytes.
2559	 */
2560	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2561		/* we only handle mbufs that are singletons.. not chains */
2562		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2563		if (m) {
2564			/* ok lets see if we can copy the data up */
2565			caddr_t *from, *to;
2566
2567			/* get the pointers and copy */
2568			to = mtod(m, caddr_t *);
2569			from = mtod((*mm), caddr_t *);
2570			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2571			/* copy the length and free up the old */
2572			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2573			sctp_m_freem(*mm);
2574			/* sucess, back copy */
2575			*mm = m;
2576		} else {
2577			/* We are in trouble in the mbuf world .. yikes */
2578			m = *mm;
2579		}
2580	}
2581	/* get pointer to the first chunk header */
2582	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2583	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2584	if (ch == NULL) {
2585		return (1);
2586	}
2587	/*
2588	 * process all DATA chunks...
2589	 */
2590	*high_tsn = asoc->cumulative_tsn;
2591	break_flag = 0;
2592	asoc->data_pkts_seen++;
2593	while (stop_proc == 0) {
2594		/* validate chunk length */
2595		chk_length = ntohs(ch->ch.chunk_length);
2596		if (length - *offset < chk_length) {
2597			/* all done, mutulated chunk */
2598			stop_proc = 1;
2599			continue;
2600		}
2601		if (ch->ch.chunk_type == SCTP_DATA) {
2602			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2603				/*
2604				 * Need to send an abort since we had a
2605				 * invalid data chunk.
2606				 */
2607				struct mbuf *op_err;
2608
2609				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2610				    0, M_DONTWAIT, 1, MT_DATA);
2611
2612				if (op_err) {
2613					struct sctp_paramhdr *ph;
2614					uint32_t *ippp;
2615
2616					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2617					    (2 * sizeof(uint32_t));
2618					ph = mtod(op_err, struct sctp_paramhdr *);
2619					ph->param_type =
2620					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2621					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2622					ippp = (uint32_t *) (ph + 1);
2623					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2624					ippp++;
2625					*ippp = asoc->cumulative_tsn;
2626
2627				}
2628				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2629				sctp_abort_association(inp, stcb, m, iphlen, sh,
2630				    op_err,
2631				    use_mflowid, mflowid,
2632				    vrf_id, port);
2633				return (2);
2634			}
2635#ifdef SCTP_AUDITING_ENABLED
2636			sctp_audit_log(0xB1, 0);
2637#endif
2638			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2639				last_chunk = 1;
2640			} else {
2641				last_chunk = 0;
2642			}
2643			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2644			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2645			    last_chunk)) {
2646				num_chunks++;
2647			}
2648			if (abort_flag)
2649				return (2);
2650
2651			if (break_flag) {
2652				/*
2653				 * Set because of out of rwnd space and no
2654				 * drop rep space left.
2655				 */
2656				stop_proc = 1;
2657				continue;
2658			}
2659		} else {
2660			/* not a data chunk in the data region */
2661			switch (ch->ch.chunk_type) {
2662			case SCTP_INITIATION:
2663			case SCTP_INITIATION_ACK:
2664			case SCTP_SELECTIVE_ACK:
2665			case SCTP_NR_SELECTIVE_ACK:
2666			case SCTP_HEARTBEAT_REQUEST:
2667			case SCTP_HEARTBEAT_ACK:
2668			case SCTP_ABORT_ASSOCIATION:
2669			case SCTP_SHUTDOWN:
2670			case SCTP_SHUTDOWN_ACK:
2671			case SCTP_OPERATION_ERROR:
2672			case SCTP_COOKIE_ECHO:
2673			case SCTP_COOKIE_ACK:
2674			case SCTP_ECN_ECHO:
2675			case SCTP_ECN_CWR:
2676			case SCTP_SHUTDOWN_COMPLETE:
2677			case SCTP_AUTHENTICATION:
2678			case SCTP_ASCONF_ACK:
2679			case SCTP_PACKET_DROPPED:
2680			case SCTP_STREAM_RESET:
2681			case SCTP_FORWARD_CUM_TSN:
2682			case SCTP_ASCONF:
2683				/*
2684				 * Now, what do we do with KNOWN chunks that
2685				 * are NOT in the right place?
2686				 *
2687				 * For now, I do nothing but ignore them. We
2688				 * may later want to add sysctl stuff to
2689				 * switch out and do either an ABORT() or
2690				 * possibly process them.
2691				 */
2692				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2693					struct mbuf *op_err;
2694
2695					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2696					sctp_abort_association(inp, stcb,
2697					    m, iphlen,
2698					    sh, op_err,
2699					    use_mflowid, mflowid,
2700					    vrf_id, port);
2701					return (2);
2702				}
2703				break;
2704			default:
2705				/* unknown chunk type, use bit rules */
2706				if (ch->ch.chunk_type & 0x40) {
2707					/* Add a error report to the queue */
2708					struct mbuf *merr;
2709					struct sctp_paramhdr *phd;
2710
2711					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2712					if (merr) {
2713						phd = mtod(merr, struct sctp_paramhdr *);
2714						/*
2715						 * We cheat and use param
2716						 * type since we did not
2717						 * bother to define a error
2718						 * cause struct. They are
2719						 * the same basic format
2720						 * with different names.
2721						 */
2722						phd->param_type =
2723						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2724						phd->param_length =
2725						    htons(chk_length + sizeof(*phd));
2726						SCTP_BUF_LEN(merr) = sizeof(*phd);
2727						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
2728						if (SCTP_BUF_NEXT(merr)) {
2729							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2730								sctp_m_freem(merr);
2731							} else {
2732								sctp_queue_op_err(stcb, merr);
2733							}
2734						} else {
2735							sctp_m_freem(merr);
2736						}
2737					}
2738				}
2739				if ((ch->ch.chunk_type & 0x80) == 0) {
2740					/* discard the rest of this packet */
2741					stop_proc = 1;
2742				}	/* else skip this bad chunk and
2743					 * continue... */
2744				break;
2745			}	/* switch of chunk type */
2746		}
2747		*offset += SCTP_SIZE32(chk_length);
2748		if ((*offset >= length) || stop_proc) {
2749			/* no more data left in the mbuf chain */
2750			stop_proc = 1;
2751			continue;
2752		}
2753		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2754		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2755		if (ch == NULL) {
2756			*offset = length;
2757			stop_proc = 1;
2758			continue;
2759		}
2760	}
2761	if (break_flag) {
2762		/*
2763		 * we need to report rwnd overrun drops.
2764		 */
2765		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2766	}
2767	if (num_chunks) {
2768		/*
2769		 * Did we get data, if so update the time for auto-close and
2770		 * give peer credit for being alive.
2771		 */
2772		SCTP_STAT_INCR(sctps_recvpktwithdata);
2773		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2774			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2775			    stcb->asoc.overall_error_count,
2776			    0,
2777			    SCTP_FROM_SCTP_INDATA,
2778			    __LINE__);
2779		}
2780		stcb->asoc.overall_error_count = 0;
2781		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2782	}
2783	/* now service all of the reassm queue if needed */
2784	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2785		sctp_service_queues(stcb, asoc);
2786
2787	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2788		/* Assure that we ack right away */
2789		stcb->asoc.send_sack = 1;
2790	}
2791	/* Start a sack timer or QUEUE a SACK for sending */
2792	sctp_sack_check(stcb, was_a_gap);
2793	return (0);
2794}
2795
2796static int
2797sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2798    uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2799    int *num_frs,
2800    uint32_t * biggest_newly_acked_tsn,
2801    uint32_t * this_sack_lowest_newack,
2802    int *rto_ok)
2803{
2804	struct sctp_tmit_chunk *tp1;
2805	unsigned int theTSN;
2806	int j, wake_him = 0, circled = 0;
2807
2808	/* Recover the tp1 we last saw */
2809	tp1 = *p_tp1;
2810	if (tp1 == NULL) {
2811		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2812	}
2813	for (j = frag_strt; j <= frag_end; j++) {
2814		theTSN = j + last_tsn;
2815		while (tp1) {
2816			if (tp1->rec.data.doing_fast_retransmit)
2817				(*num_frs) += 1;
2818
2819			/*-
2820			 * CMT: CUCv2 algorithm. For each TSN being
2821			 * processed from the sent queue, track the
2822			 * next expected pseudo-cumack, or
2823			 * rtx_pseudo_cumack, if required. Separate
2824			 * cumack trackers for first transmissions,
2825			 * and retransmissions.
2826			 */
2827			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2828			    (tp1->snd_count == 1)) {
2829				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2830				tp1->whoTo->find_pseudo_cumack = 0;
2831			}
2832			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2833			    (tp1->snd_count > 1)) {
2834				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2835				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2836			}
2837			if (tp1->rec.data.TSN_seq == theTSN) {
2838				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2839					/*-
2840					 * must be held until
2841					 * cum-ack passes
2842					 */
2843					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2844						/*-
2845						 * If it is less than RESEND, it is
2846						 * now no-longer in flight.
2847						 * Higher values may already be set
2848						 * via previous Gap Ack Blocks...
2849						 * i.e. ACKED or RESEND.
2850						 */
2851						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2852						    *biggest_newly_acked_tsn)) {
2853							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2854						}
2855						/*-
2856						 * CMT: SFR algo (and HTNA) - set
2857						 * saw_newack to 1 for dest being
2858						 * newly acked. update
2859						 * this_sack_highest_newack if
2860						 * appropriate.
2861						 */
2862						if (tp1->rec.data.chunk_was_revoked == 0)
2863							tp1->whoTo->saw_newack = 1;
2864
2865						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2866						    tp1->whoTo->this_sack_highest_newack)) {
2867							tp1->whoTo->this_sack_highest_newack =
2868							    tp1->rec.data.TSN_seq;
2869						}
2870						/*-
2871						 * CMT DAC algo: also update
2872						 * this_sack_lowest_newack
2873						 */
2874						if (*this_sack_lowest_newack == 0) {
2875							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2876								sctp_log_sack(*this_sack_lowest_newack,
2877								    last_tsn,
2878								    tp1->rec.data.TSN_seq,
2879								    0,
2880								    0,
2881								    SCTP_LOG_TSN_ACKED);
2882							}
2883							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2884						}
2885						/*-
2886						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2887						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2888						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2889						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2890						 * Separate pseudo_cumack trackers for first transmissions and
2891						 * retransmissions.
2892						 */
2893						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2894							if (tp1->rec.data.chunk_was_revoked == 0) {
2895								tp1->whoTo->new_pseudo_cumack = 1;
2896							}
2897							tp1->whoTo->find_pseudo_cumack = 1;
2898						}
2899						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2900							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2901						}
2902						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2903							if (tp1->rec.data.chunk_was_revoked == 0) {
2904								tp1->whoTo->new_pseudo_cumack = 1;
2905							}
2906							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2907						}
2908						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2909							sctp_log_sack(*biggest_newly_acked_tsn,
2910							    last_tsn,
2911							    tp1->rec.data.TSN_seq,
2912							    frag_strt,
2913							    frag_end,
2914							    SCTP_LOG_TSN_ACKED);
2915						}
2916						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2917							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2918							    tp1->whoTo->flight_size,
2919							    tp1->book_size,
2920							    (uintptr_t) tp1->whoTo,
2921							    tp1->rec.data.TSN_seq);
2922						}
2923						sctp_flight_size_decrease(tp1);
2924						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2925							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2926							    tp1);
2927						}
2928						sctp_total_flight_decrease(stcb, tp1);
2929
2930						tp1->whoTo->net_ack += tp1->send_size;
2931						if (tp1->snd_count < 2) {
2932							/*-
2933							 * True non-retransmited chunk
2934							 */
2935							tp1->whoTo->net_ack2 += tp1->send_size;
2936
2937							/*-
2938							 * update RTO too ?
2939							 */
2940							if (tp1->do_rtt) {
2941								if (*rto_ok) {
2942									tp1->whoTo->RTO =
2943									    sctp_calculate_rto(stcb,
2944									    &stcb->asoc,
2945									    tp1->whoTo,
2946									    &tp1->sent_rcv_time,
2947									    sctp_align_safe_nocopy,
2948									    SCTP_RTT_FROM_DATA);
2949									*rto_ok = 0;
2950								}
2951								if (tp1->whoTo->rto_needed == 0) {
2952									tp1->whoTo->rto_needed = 1;
2953								}
2954								tp1->do_rtt = 0;
2955							}
2956						}
2957					}
2958					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2959						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2960						    stcb->asoc.this_sack_highest_gap)) {
2961							stcb->asoc.this_sack_highest_gap =
2962							    tp1->rec.data.TSN_seq;
2963						}
2964						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2965							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2966#ifdef SCTP_AUDITING_ENABLED
2967							sctp_audit_log(0xB2,
2968							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2969#endif
2970						}
2971					}
2972					/*-
2973					 * All chunks NOT UNSENT fall through here and are marked
2974					 * (leave PR-SCTP ones that are to skip alone though)
2975					 */
2976					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2977						tp1->sent = SCTP_DATAGRAM_MARKED;
2978
2979					if (tp1->rec.data.chunk_was_revoked) {
2980						/* deflate the cwnd */
2981						tp1->whoTo->cwnd -= tp1->book_size;
2982						tp1->rec.data.chunk_was_revoked = 0;
2983					}
2984					/* NR Sack code here */
2985					if (nr_sacking) {
2986						if (tp1->data) {
2987							/*
2988							 * sa_ignore
2989							 * NO_NULL_CHK
2990							 */
2991							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2992							sctp_m_freem(tp1->data);
2993							tp1->data = NULL;
2994						}
2995						wake_him++;
2996					}
2997				}
2998				break;
2999			}	/* if (tp1->TSN_seq == theTSN) */
3000			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3001				break;
3002			}
3003			tp1 = TAILQ_NEXT(tp1, sctp_next);
3004			if ((tp1 == NULL) && (circled == 0)) {
3005				circled++;
3006				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3007			}
3008		}		/* end while (tp1) */
3009		if (tp1 == NULL) {
3010			circled = 0;
3011			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3012		}
3013		/* In case the fragments were not in order we must reset */
3014	}			/* end for (j = fragStart */
3015	*p_tp1 = tp1;
3016	return (wake_him);	/* Return value only used for nr-sack */
3017}
3018
3019
3020static int
3021sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3022    uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3023    uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3024    int num_seg, int num_nr_seg, int *rto_ok)
3025{
3026	struct sctp_gap_ack_block *frag, block;
3027	struct sctp_tmit_chunk *tp1;
3028	int i;
3029	int num_frs = 0;
3030	int chunk_freed;
3031	int non_revocable;
3032	uint16_t frag_strt, frag_end, prev_frag_end;
3033
3034	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3035	prev_frag_end = 0;
3036	chunk_freed = 0;
3037
3038	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3039		if (i == num_seg) {
3040			prev_frag_end = 0;
3041			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3042		}
3043		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3044		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3045		*offset += sizeof(block);
3046		if (frag == NULL) {
3047			return (chunk_freed);
3048		}
3049		frag_strt = ntohs(frag->start);
3050		frag_end = ntohs(frag->end);
3051
3052		if (frag_strt > frag_end) {
3053			/* This gap report is malformed, skip it. */
3054			continue;
3055		}
3056		if (frag_strt <= prev_frag_end) {
3057			/* This gap report is not in order, so restart. */
3058			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3059		}
3060		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3061			*biggest_tsn_acked = last_tsn + frag_end;
3062		}
3063		if (i < num_seg) {
3064			non_revocable = 0;
3065		} else {
3066			non_revocable = 1;
3067		}
3068		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3069		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3070		    this_sack_lowest_newack, rto_ok)) {
3071			chunk_freed = 1;
3072		}
3073		prev_frag_end = frag_end;
3074	}
3075	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3076		if (num_frs)
3077			sctp_log_fr(*biggest_tsn_acked,
3078			    *biggest_newly_acked_tsn,
3079			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3080	}
3081	return (chunk_freed);
3082}
3083
3084static void
3085sctp_check_for_revoked(struct sctp_tcb *stcb,
3086    struct sctp_association *asoc, uint32_t cumack,
3087    uint32_t biggest_tsn_acked)
3088{
3089	struct sctp_tmit_chunk *tp1;
3090	int tot_revoked = 0;
3091
3092	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3093		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3094			/*
3095			 * ok this guy is either ACK or MARKED. If it is
3096			 * ACKED it has been previously acked but not this
3097			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3098			 * again.
3099			 */
3100			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3101				break;
3102			}
3103			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3104				/* it has been revoked */
3105				tp1->sent = SCTP_DATAGRAM_SENT;
3106				tp1->rec.data.chunk_was_revoked = 1;
3107				/*
3108				 * We must add this stuff back in to assure
3109				 * timers and such get started.
3110				 */
3111				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3112					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3113					    tp1->whoTo->flight_size,
3114					    tp1->book_size,
3115					    (uintptr_t) tp1->whoTo,
3116					    tp1->rec.data.TSN_seq);
3117				}
3118				sctp_flight_size_increase(tp1);
3119				sctp_total_flight_increase(stcb, tp1);
3120				/*
3121				 * We inflate the cwnd to compensate for our
3122				 * artificial inflation of the flight_size.
3123				 */
3124				tp1->whoTo->cwnd += tp1->book_size;
3125				tot_revoked++;
3126				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3127					sctp_log_sack(asoc->last_acked_seq,
3128					    cumack,
3129					    tp1->rec.data.TSN_seq,
3130					    0,
3131					    0,
3132					    SCTP_LOG_TSN_REVOKED);
3133				}
3134			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3135				/* it has been re-acked in this SACK */
3136				tp1->sent = SCTP_DATAGRAM_ACKED;
3137			}
3138		}
3139		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3140			break;
3141	}
3142}
3143
3144
3145static void
3146sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3147    uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3148{
3149	struct sctp_tmit_chunk *tp1;
3150	int strike_flag = 0;
3151	struct timeval now;
3152	int tot_retrans = 0;
3153	uint32_t sending_seq;
3154	struct sctp_nets *net;
3155	int num_dests_sacked = 0;
3156
3157	/*
3158	 * select the sending_seq, this is either the next thing ready to be
3159	 * sent but not transmitted, OR, the next seq we assign.
3160	 */
3161	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3162	if (tp1 == NULL) {
3163		sending_seq = asoc->sending_seq;
3164	} else {
3165		sending_seq = tp1->rec.data.TSN_seq;
3166	}
3167
3168	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3169	if ((asoc->sctp_cmt_on_off > 0) &&
3170	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3171		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3172			if (net->saw_newack)
3173				num_dests_sacked++;
3174		}
3175	}
3176	if (stcb->asoc.peer_supports_prsctp) {
3177		(void)SCTP_GETTIME_TIMEVAL(&now);
3178	}
3179	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3180		strike_flag = 0;
3181		if (tp1->no_fr_allowed) {
3182			/* this one had a timeout or something */
3183			continue;
3184		}
3185		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3186			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3187				sctp_log_fr(biggest_tsn_newly_acked,
3188				    tp1->rec.data.TSN_seq,
3189				    tp1->sent,
3190				    SCTP_FR_LOG_CHECK_STRIKE);
3191		}
3192		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3193		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3194			/* done */
3195			break;
3196		}
3197		if (stcb->asoc.peer_supports_prsctp) {
3198			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3199				/* Is it expired? */
3200				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3201					/* Yes so drop it */
3202					if (tp1->data != NULL) {
3203						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3204						    SCTP_SO_NOT_LOCKED);
3205					}
3206					continue;
3207				}
3208			}
3209		}
3210		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3211			/* we are beyond the tsn in the sack  */
3212			break;
3213		}
3214		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3215			/* either a RESEND, ACKED, or MARKED */
3216			/* skip */
3217			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3218				/* Continue strikin FWD-TSN chunks */
3219				tp1->rec.data.fwd_tsn_cnt++;
3220			}
3221			continue;
3222		}
3223		/*
3224		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3225		 */
3226		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3227			/*
3228			 * No new acks were receieved for data sent to this
3229			 * dest. Therefore, according to the SFR algo for
3230			 * CMT, no data sent to this dest can be marked for
3231			 * FR using this SACK.
3232			 */
3233			continue;
3234		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3235		    tp1->whoTo->this_sack_highest_newack)) {
3236			/*
3237			 * CMT: New acks were receieved for data sent to
3238			 * this dest. But no new acks were seen for data
3239			 * sent after tp1. Therefore, according to the SFR
3240			 * algo for CMT, tp1 cannot be marked for FR using
3241			 * this SACK. This step covers part of the DAC algo
3242			 * and the HTNA algo as well.
3243			 */
3244			continue;
3245		}
3246		/*
3247		 * Here we check to see if we were have already done a FR
3248		 * and if so we see if the biggest TSN we saw in the sack is
3249		 * smaller than the recovery point. If so we don't strike
3250		 * the tsn... otherwise we CAN strike the TSN.
3251		 */
3252		/*
3253		 * @@@ JRI: Check for CMT if (accum_moved &&
3254		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3255		 * 0)) {
3256		 */
3257		if (accum_moved && asoc->fast_retran_loss_recovery) {
3258			/*
3259			 * Strike the TSN if in fast-recovery and cum-ack
3260			 * moved.
3261			 */
3262			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3263				sctp_log_fr(biggest_tsn_newly_acked,
3264				    tp1->rec.data.TSN_seq,
3265				    tp1->sent,
3266				    SCTP_FR_LOG_STRIKE_CHUNK);
3267			}
3268			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3269				tp1->sent++;
3270			}
3271			if ((asoc->sctp_cmt_on_off > 0) &&
3272			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3273				/*
3274				 * CMT DAC algorithm: If SACK flag is set to
3275				 * 0, then lowest_newack test will not pass
3276				 * because it would have been set to the
3277				 * cumack earlier. If not already to be
3278				 * rtx'd, If not a mixed sack and if tp1 is
3279				 * not between two sacked TSNs, then mark by
3280				 * one more. NOTE that we are marking by one
3281				 * additional time since the SACK DAC flag
3282				 * indicates that two packets have been
3283				 * received after this missing TSN.
3284				 */
3285				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3286				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3287					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3288						sctp_log_fr(16 + num_dests_sacked,
3289						    tp1->rec.data.TSN_seq,
3290						    tp1->sent,
3291						    SCTP_FR_LOG_STRIKE_CHUNK);
3292					}
3293					tp1->sent++;
3294				}
3295			}
3296		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3297		    (asoc->sctp_cmt_on_off == 0)) {
3298			/*
3299			 * For those that have done a FR we must take
3300			 * special consideration if we strike. I.e the
3301			 * biggest_newly_acked must be higher than the
3302			 * sending_seq at the time we did the FR.
3303			 */
3304			if (
3305#ifdef SCTP_FR_TO_ALTERNATE
3306			/*
3307			 * If FR's go to new networks, then we must only do
3308			 * this for singly homed asoc's. However if the FR's
3309			 * go to the same network (Armando's work) then its
3310			 * ok to FR multiple times.
3311			 */
3312			    (asoc->numnets < 2)
3313#else
3314			    (1)
3315#endif
3316			    ) {
3317
3318				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3319				    tp1->rec.data.fast_retran_tsn)) {
3320					/*
3321					 * Strike the TSN, since this ack is
3322					 * beyond where things were when we
3323					 * did a FR.
3324					 */
3325					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3326						sctp_log_fr(biggest_tsn_newly_acked,
3327						    tp1->rec.data.TSN_seq,
3328						    tp1->sent,
3329						    SCTP_FR_LOG_STRIKE_CHUNK);
3330					}
3331					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3332						tp1->sent++;
3333					}
3334					strike_flag = 1;
3335					if ((asoc->sctp_cmt_on_off > 0) &&
3336					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3337						/*
3338						 * CMT DAC algorithm: If
3339						 * SACK flag is set to 0,
3340						 * then lowest_newack test
3341						 * will not pass because it
3342						 * would have been set to
3343						 * the cumack earlier. If
3344						 * not already to be rtx'd,
3345						 * If not a mixed sack and
3346						 * if tp1 is not between two
3347						 * sacked TSNs, then mark by
3348						 * one more. NOTE that we
3349						 * are marking by one
3350						 * additional time since the
3351						 * SACK DAC flag indicates
3352						 * that two packets have
3353						 * been received after this
3354						 * missing TSN.
3355						 */
3356						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3357						    (num_dests_sacked == 1) &&
3358						    SCTP_TSN_GT(this_sack_lowest_newack,
3359						    tp1->rec.data.TSN_seq)) {
3360							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3361								sctp_log_fr(32 + num_dests_sacked,
3362								    tp1->rec.data.TSN_seq,
3363								    tp1->sent,
3364								    SCTP_FR_LOG_STRIKE_CHUNK);
3365							}
3366							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3367								tp1->sent++;
3368							}
3369						}
3370					}
3371				}
3372			}
3373			/*
3374			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3375			 * algo covers HTNA.
3376			 */
3377		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3378		    biggest_tsn_newly_acked)) {
3379			/*
3380			 * We don't strike these: This is the  HTNA
3381			 * algorithm i.e. we don't strike If our TSN is
3382			 * larger than the Highest TSN Newly Acked.
3383			 */
3384			;
3385		} else {
3386			/* Strike the TSN */
3387			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3388				sctp_log_fr(biggest_tsn_newly_acked,
3389				    tp1->rec.data.TSN_seq,
3390				    tp1->sent,
3391				    SCTP_FR_LOG_STRIKE_CHUNK);
3392			}
3393			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3394				tp1->sent++;
3395			}
3396			if ((asoc->sctp_cmt_on_off > 0) &&
3397			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3398				/*
3399				 * CMT DAC algorithm: If SACK flag is set to
3400				 * 0, then lowest_newack test will not pass
3401				 * because it would have been set to the
3402				 * cumack earlier. If not already to be
3403				 * rtx'd, If not a mixed sack and if tp1 is
3404				 * not between two sacked TSNs, then mark by
3405				 * one more. NOTE that we are marking by one
3406				 * additional time since the SACK DAC flag
3407				 * indicates that two packets have been
3408				 * received after this missing TSN.
3409				 */
3410				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3411				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3412					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3413						sctp_log_fr(48 + num_dests_sacked,
3414						    tp1->rec.data.TSN_seq,
3415						    tp1->sent,
3416						    SCTP_FR_LOG_STRIKE_CHUNK);
3417					}
3418					tp1->sent++;
3419				}
3420			}
3421		}
3422		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3423			struct sctp_nets *alt;
3424
3425			/* fix counts and things */
3426			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3427				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3428				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3429				    tp1->book_size,
3430				    (uintptr_t) tp1->whoTo,
3431				    tp1->rec.data.TSN_seq);
3432			}
3433			if (tp1->whoTo) {
3434				tp1->whoTo->net_ack++;
3435				sctp_flight_size_decrease(tp1);
3436				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3437					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3438					    tp1);
3439				}
3440			}
3441			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3442				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3443				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3444			}
3445			/* add back to the rwnd */
3446			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3447
3448			/* remove from the total flight */
3449			sctp_total_flight_decrease(stcb, tp1);
3450
3451			if ((stcb->asoc.peer_supports_prsctp) &&
3452			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3453				/*
3454				 * Has it been retransmitted tv_sec times? -
3455				 * we store the retran count there.
3456				 */
3457				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3458					/* Yes, so drop it */
3459					if (tp1->data != NULL) {
3460						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3461						    SCTP_SO_NOT_LOCKED);
3462					}
3463					/* Make sure to flag we had a FR */
3464					tp1->whoTo->net_ack++;
3465					continue;
3466				}
3467			}
3468			/*
3469			 * SCTP_PRINTF("OK, we are now ready to FR this
3470			 * guy\n");
3471			 */
3472			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3473				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3474				    0, SCTP_FR_MARKED);
3475			}
3476			if (strike_flag) {
3477				/* This is a subsequent FR */
3478				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3479			}
3480			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3481			if (asoc->sctp_cmt_on_off > 0) {
3482				/*
3483				 * CMT: Using RTX_SSTHRESH policy for CMT.
3484				 * If CMT is being used, then pick dest with
3485				 * largest ssthresh for any retransmission.
3486				 */
3487				tp1->no_fr_allowed = 1;
3488				alt = tp1->whoTo;
3489				/* sa_ignore NO_NULL_CHK */
3490				if (asoc->sctp_cmt_pf > 0) {
3491					/*
3492					 * JRS 5/18/07 - If CMT PF is on,
3493					 * use the PF version of
3494					 * find_alt_net()
3495					 */
3496					alt = sctp_find_alternate_net(stcb, alt, 2);
3497				} else {
3498					/*
3499					 * JRS 5/18/07 - If only CMT is on,
3500					 * use the CMT version of
3501					 * find_alt_net()
3502					 */
3503					/* sa_ignore NO_NULL_CHK */
3504					alt = sctp_find_alternate_net(stcb, alt, 1);
3505				}
3506				if (alt == NULL) {
3507					alt = tp1->whoTo;
3508				}
3509				/*
3510				 * CUCv2: If a different dest is picked for
3511				 * the retransmission, then new
3512				 * (rtx-)pseudo_cumack needs to be tracked
3513				 * for orig dest. Let CUCv2 track new (rtx-)
3514				 * pseudo-cumack always.
3515				 */
3516				if (tp1->whoTo) {
3517					tp1->whoTo->find_pseudo_cumack = 1;
3518					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3519				}
3520			} else {/* CMT is OFF */
3521
3522#ifdef SCTP_FR_TO_ALTERNATE
3523				/* Can we find an alternate? */
3524				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3525#else
3526				/*
3527				 * default behavior is to NOT retransmit
3528				 * FR's to an alternate. Armando Caro's
3529				 * paper details why.
3530				 */
3531				alt = tp1->whoTo;
3532#endif
3533			}
3534
3535			tp1->rec.data.doing_fast_retransmit = 1;
3536			tot_retrans++;
3537			/* mark the sending seq for possible subsequent FR's */
3538			/*
3539			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3540			 * (uint32_t)tpi->rec.data.TSN_seq);
3541			 */
3542			if (TAILQ_EMPTY(&asoc->send_queue)) {
3543				/*
3544				 * If the queue of send is empty then its
3545				 * the next sequence number that will be
3546				 * assigned so we subtract one from this to
3547				 * get the one we last sent.
3548				 */
3549				tp1->rec.data.fast_retran_tsn = sending_seq;
3550			} else {
3551				/*
3552				 * If there are chunks on the send queue
3553				 * (unsent data that has made it from the
3554				 * stream queues but not out the door, we
3555				 * take the first one (which will have the
3556				 * lowest TSN) and subtract one to get the
3557				 * one we last sent.
3558				 */
3559				struct sctp_tmit_chunk *ttt;
3560
3561				ttt = TAILQ_FIRST(&asoc->send_queue);
3562				tp1->rec.data.fast_retran_tsn =
3563				    ttt->rec.data.TSN_seq;
3564			}
3565
3566			if (tp1->do_rtt) {
3567				/*
3568				 * this guy had a RTO calculation pending on
3569				 * it, cancel it
3570				 */
3571				if ((tp1->whoTo != NULL) &&
3572				    (tp1->whoTo->rto_needed == 0)) {
3573					tp1->whoTo->rto_needed = 1;
3574				}
3575				tp1->do_rtt = 0;
3576			}
3577			if (alt != tp1->whoTo) {
3578				/* yes, there is an alternate. */
3579				sctp_free_remote_addr(tp1->whoTo);
3580				/* sa_ignore FREED_MEMORY */
3581				tp1->whoTo = alt;
3582				atomic_add_int(&alt->ref_count, 1);
3583			}
3584		}
3585	}
3586}
3587
3588struct sctp_tmit_chunk *
3589sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3590    struct sctp_association *asoc)
3591{
3592	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3593	struct timeval now;
3594	int now_filled = 0;
3595
3596	if (asoc->peer_supports_prsctp == 0) {
3597		return (NULL);
3598	}
3599	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3600		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3601		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3602			/* no chance to advance, out of here */
3603			break;
3604		}
3605		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3606			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3607				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3608				    asoc->advanced_peer_ack_point,
3609				    tp1->rec.data.TSN_seq, 0, 0);
3610			}
3611		}
3612		if (!PR_SCTP_ENABLED(tp1->flags)) {
3613			/*
3614			 * We can't fwd-tsn past any that are reliable aka
3615			 * retransmitted until the asoc fails.
3616			 */
3617			break;
3618		}
3619		if (!now_filled) {
3620			(void)SCTP_GETTIME_TIMEVAL(&now);
3621			now_filled = 1;
3622		}
3623		/*
3624		 * now we got a chunk which is marked for another
3625		 * retransmission to a PR-stream but has run out its chances
3626		 * already maybe OR has been marked to skip now. Can we skip
3627		 * it if its a resend?
3628		 */
3629		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3630		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3631			/*
3632			 * Now is this one marked for resend and its time is
3633			 * now up?
3634			 */
3635			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3636				/* Yes so drop it */
3637				if (tp1->data) {
3638					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3639					    1, SCTP_SO_NOT_LOCKED);
3640				}
3641			} else {
3642				/*
3643				 * No, we are done when hit one for resend
3644				 * whos time as not expired.
3645				 */
3646				break;
3647			}
3648		}
3649		/*
3650		 * Ok now if this chunk is marked to drop it we can clean up
3651		 * the chunk, advance our peer ack point and we can check
3652		 * the next chunk.
3653		 */
3654		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3655			/* advance PeerAckPoint goes forward */
3656			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3657				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3658				a_adv = tp1;
3659			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3660				/* No update but we do save the chk */
3661				a_adv = tp1;
3662			}
3663		} else {
3664			/*
3665			 * If it is still in RESEND we can advance no
3666			 * further
3667			 */
3668			break;
3669		}
3670	}
3671	return (a_adv);
3672}
3673
3674static int
3675sctp_fs_audit(struct sctp_association *asoc)
3676{
3677	struct sctp_tmit_chunk *chk;
3678	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3679	int entry_flight, entry_cnt, ret;
3680
3681	entry_flight = asoc->total_flight;
3682	entry_cnt = asoc->total_flight_count;
3683	ret = 0;
3684
3685	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3686		return (0);
3687
3688	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3689		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3690			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3691			    chk->rec.data.TSN_seq,
3692			    chk->send_size,
3693			    chk->snd_count);
3694			inflight++;
3695		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3696			resend++;
3697		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3698			inbetween++;
3699		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3700			above++;
3701		} else {
3702			acked++;
3703		}
3704	}
3705
3706	if ((inflight > 0) || (inbetween > 0)) {
3707#ifdef INVARIANTS
3708		panic("Flight size-express incorrect? \n");
3709#else
3710		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3711		    entry_flight, entry_cnt);
3712
3713		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3714		    inflight, inbetween, resend, above, acked);
3715		ret = 1;
3716#endif
3717	}
3718	return (ret);
3719}
3720
3721
3722static void
3723sctp_window_probe_recovery(struct sctp_tcb *stcb,
3724    struct sctp_association *asoc,
3725    struct sctp_tmit_chunk *tp1)
3726{
3727	tp1->window_probe = 0;
3728	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3729		/* TSN's skipped we do NOT move back. */
3730		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3731		    tp1->whoTo->flight_size,
3732		    tp1->book_size,
3733		    (uintptr_t) tp1->whoTo,
3734		    tp1->rec.data.TSN_seq);
3735		return;
3736	}
3737	/* First setup this by shrinking flight */
3738	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3739		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3740		    tp1);
3741	}
3742	sctp_flight_size_decrease(tp1);
3743	sctp_total_flight_decrease(stcb, tp1);
3744	/* Now mark for resend */
3745	tp1->sent = SCTP_DATAGRAM_RESEND;
3746	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3747
3748	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3749		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3750		    tp1->whoTo->flight_size,
3751		    tp1->book_size,
3752		    (uintptr_t) tp1->whoTo,
3753		    tp1->rec.data.TSN_seq);
3754	}
3755}
3756
3757void
3758sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3759    uint32_t rwnd, int *abort_now, int ecne_seen)
3760{
3761	struct sctp_nets *net;
3762	struct sctp_association *asoc;
3763	struct sctp_tmit_chunk *tp1, *tp2;
3764	uint32_t old_rwnd;
3765	int win_probe_recovery = 0;
3766	int win_probe_recovered = 0;
3767	int j, done_once = 0;
3768	int rto_ok = 1;
3769
3770	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3771		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3772		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3773	}
3774	SCTP_TCB_LOCK_ASSERT(stcb);
3775#ifdef SCTP_ASOCLOG_OF_TSNS
3776	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3777	stcb->asoc.cumack_log_at++;
3778	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3779		stcb->asoc.cumack_log_at = 0;
3780	}
3781#endif
3782	asoc = &stcb->asoc;
3783	old_rwnd = asoc->peers_rwnd;
3784	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3785		/* old ack */
3786		return;
3787	} else if (asoc->last_acked_seq == cumack) {
3788		/* Window update sack */
3789		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3790		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3791		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3792			/* SWS sender side engages */
3793			asoc->peers_rwnd = 0;
3794		}
3795		if (asoc->peers_rwnd > old_rwnd) {
3796			goto again;
3797		}
3798		return;
3799	}
3800	/* First setup for CC stuff */
3801	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3802		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3803			/* Drag along the window_tsn for cwr's */
3804			net->cwr_window_tsn = cumack;
3805		}
3806		net->prev_cwnd = net->cwnd;
3807		net->net_ack = 0;
3808		net->net_ack2 = 0;
3809
3810		/*
3811		 * CMT: Reset CUC and Fast recovery algo variables before
3812		 * SACK processing
3813		 */
3814		net->new_pseudo_cumack = 0;
3815		net->will_exit_fast_recovery = 0;
3816		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3817			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3818		}
3819	}
3820	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3821		uint32_t send_s;
3822
3823		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3824			tp1 = TAILQ_LAST(&asoc->sent_queue,
3825			    sctpchunk_listhead);
3826			send_s = tp1->rec.data.TSN_seq + 1;
3827		} else {
3828			send_s = asoc->sending_seq;
3829		}
3830		if (SCTP_TSN_GE(cumack, send_s)) {
3831#ifndef INVARIANTS
3832			struct mbuf *oper;
3833
3834#endif
3835#ifdef INVARIANTS
3836			panic("Impossible sack 1");
3837#else
3838
3839			*abort_now = 1;
3840			/* XXX */
3841			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3842			    0, M_DONTWAIT, 1, MT_DATA);
3843			if (oper) {
3844				struct sctp_paramhdr *ph;
3845				uint32_t *ippp;
3846
3847				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3848				    sizeof(uint32_t);
3849				ph = mtod(oper, struct sctp_paramhdr *);
3850				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3851				ph->param_length = htons(SCTP_BUF_LEN(oper));
3852				ippp = (uint32_t *) (ph + 1);
3853				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3854			}
3855			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3856			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3857			return;
3858#endif
3859		}
3860	}
3861	asoc->this_sack_highest_gap = cumack;
3862	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3863		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3864		    stcb->asoc.overall_error_count,
3865		    0,
3866		    SCTP_FROM_SCTP_INDATA,
3867		    __LINE__);
3868	}
3869	stcb->asoc.overall_error_count = 0;
3870	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3871		/* process the new consecutive TSN first */
3872		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3873			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3874				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3875					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3876				}
3877				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3878					/*
3879					 * If it is less than ACKED, it is
3880					 * now no-longer in flight. Higher
3881					 * values may occur during marking
3882					 */
3883					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3884						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3885							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3886							    tp1->whoTo->flight_size,
3887							    tp1->book_size,
3888							    (uintptr_t) tp1->whoTo,
3889							    tp1->rec.data.TSN_seq);
3890						}
3891						sctp_flight_size_decrease(tp1);
3892						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3893							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3894							    tp1);
3895						}
3896						/* sa_ignore NO_NULL_CHK */
3897						sctp_total_flight_decrease(stcb, tp1);
3898					}
3899					tp1->whoTo->net_ack += tp1->send_size;
3900					if (tp1->snd_count < 2) {
3901						/*
3902						 * True non-retransmited
3903						 * chunk
3904						 */
3905						tp1->whoTo->net_ack2 +=
3906						    tp1->send_size;
3907
3908						/* update RTO too? */
3909						if (tp1->do_rtt) {
3910							if (rto_ok) {
3911								tp1->whoTo->RTO =
3912								/*
3913								 * sa_ignore
3914								 * NO_NULL_CH
3915								 * K
3916								 */
3917								    sctp_calculate_rto(stcb,
3918								    asoc, tp1->whoTo,
3919								    &tp1->sent_rcv_time,
3920								    sctp_align_safe_nocopy,
3921								    SCTP_RTT_FROM_DATA);
3922								rto_ok = 0;
3923							}
3924							if (tp1->whoTo->rto_needed == 0) {
3925								tp1->whoTo->rto_needed = 1;
3926							}
3927							tp1->do_rtt = 0;
3928						}
3929					}
3930					/*
3931					 * CMT: CUCv2 algorithm. From the
3932					 * cumack'd TSNs, for each TSN being
3933					 * acked for the first time, set the
3934					 * following variables for the
3935					 * corresp destination.
3936					 * new_pseudo_cumack will trigger a
3937					 * cwnd update.
3938					 * find_(rtx_)pseudo_cumack will
3939					 * trigger search for the next
3940					 * expected (rtx-)pseudo-cumack.
3941					 */
3942					tp1->whoTo->new_pseudo_cumack = 1;
3943					tp1->whoTo->find_pseudo_cumack = 1;
3944					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3945
3946					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3947						/* sa_ignore NO_NULL_CHK */
3948						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3949					}
3950				}
3951				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3952					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3953				}
3954				if (tp1->rec.data.chunk_was_revoked) {
3955					/* deflate the cwnd */
3956					tp1->whoTo->cwnd -= tp1->book_size;
3957					tp1->rec.data.chunk_was_revoked = 0;
3958				}
3959				tp1->sent = SCTP_DATAGRAM_ACKED;
3960				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3961				if (tp1->data) {
3962					/* sa_ignore NO_NULL_CHK */
3963					sctp_free_bufspace(stcb, asoc, tp1, 1);
3964					sctp_m_freem(tp1->data);
3965					tp1->data = NULL;
3966				}
3967				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3968					sctp_log_sack(asoc->last_acked_seq,
3969					    cumack,
3970					    tp1->rec.data.TSN_seq,
3971					    0,
3972					    0,
3973					    SCTP_LOG_FREE_SENT);
3974				}
3975				asoc->sent_queue_cnt--;
3976				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3977			} else {
3978				break;
3979			}
3980		}
3981
3982	}
3983	/* sa_ignore NO_NULL_CHK */
3984	if (stcb->sctp_socket) {
3985#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3986		struct socket *so;
3987
3988#endif
3989		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3990		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3991			/* sa_ignore NO_NULL_CHK */
3992			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3993		}
3994#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3995		so = SCTP_INP_SO(stcb->sctp_ep);
3996		atomic_add_int(&stcb->asoc.refcnt, 1);
3997		SCTP_TCB_UNLOCK(stcb);
3998		SCTP_SOCKET_LOCK(so, 1);
3999		SCTP_TCB_LOCK(stcb);
4000		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4001		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4002			/* assoc was freed while we were unlocked */
4003			SCTP_SOCKET_UNLOCK(so, 1);
4004			return;
4005		}
4006#endif
4007		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4008#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4009		SCTP_SOCKET_UNLOCK(so, 1);
4010#endif
4011	} else {
4012		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4013			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4014		}
4015	}
4016
4017	/* JRS - Use the congestion control given in the CC module */
4018	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4019		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4020			if (net->net_ack2 > 0) {
4021				/*
4022				 * Karn's rule applies to clearing error
4023				 * count, this is optional.
4024				 */
4025				net->error_count = 0;
4026				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4027					/* addr came good */
4028					net->dest_state |= SCTP_ADDR_REACHABLE;
4029					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4030					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4031				}
4032				if (net == stcb->asoc.primary_destination) {
4033					if (stcb->asoc.alternate) {
4034						/*
4035						 * release the alternate,
4036						 * primary is good
4037						 */
4038						sctp_free_remote_addr(stcb->asoc.alternate);
4039						stcb->asoc.alternate = NULL;
4040					}
4041				}
4042				if (net->dest_state & SCTP_ADDR_PF) {
4043					net->dest_state &= ~SCTP_ADDR_PF;
4044					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4045					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4046					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4047					/* Done with this net */
4048					net->net_ack = 0;
4049				}
4050				/* restore any doubled timers */
4051				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4052				if (net->RTO < stcb->asoc.minrto) {
4053					net->RTO = stcb->asoc.minrto;
4054				}
4055				if (net->RTO > stcb->asoc.maxrto) {
4056					net->RTO = stcb->asoc.maxrto;
4057				}
4058			}
4059		}
4060		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4061	}
4062	asoc->last_acked_seq = cumack;
4063
4064	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4065		/* nothing left in-flight */
4066		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4067			net->flight_size = 0;
4068			net->partial_bytes_acked = 0;
4069		}
4070		asoc->total_flight = 0;
4071		asoc->total_flight_count = 0;
4072	}
4073	/* RWND update */
4074	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4075	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4076	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4077		/* SWS sender side engages */
4078		asoc->peers_rwnd = 0;
4079	}
4080	if (asoc->peers_rwnd > old_rwnd) {
4081		win_probe_recovery = 1;
4082	}
4083	/* Now assure a timer where data is queued at */
4084again:
4085	j = 0;
4086	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4087		int to_ticks;
4088
4089		if (win_probe_recovery && (net->window_probe)) {
4090			win_probe_recovered = 1;
4091			/*
4092			 * Find first chunk that was used with window probe
4093			 * and clear the sent
4094			 */
4095			/* sa_ignore FREED_MEMORY */
4096			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4097				if (tp1->window_probe) {
4098					/* move back to data send queue */
4099					sctp_window_probe_recovery(stcb, asoc, tp1);
4100					break;
4101				}
4102			}
4103		}
4104		if (net->RTO == 0) {
4105			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4106		} else {
4107			to_ticks = MSEC_TO_TICKS(net->RTO);
4108		}
4109		if (net->flight_size) {
4110			j++;
4111			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4112			    sctp_timeout_handler, &net->rxt_timer);
4113			if (net->window_probe) {
4114				net->window_probe = 0;
4115			}
4116		} else {
4117			if (net->window_probe) {
4118				/*
4119				 * In window probes we must assure a timer
4120				 * is still running there
4121				 */
4122				net->window_probe = 0;
4123				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4124					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4125					    sctp_timeout_handler, &net->rxt_timer);
4126				}
4127			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4128				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4129				    stcb, net,
4130				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4131			}
4132		}
4133	}
4134	if ((j == 0) &&
4135	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4136	    (asoc->sent_queue_retran_cnt == 0) &&
4137	    (win_probe_recovered == 0) &&
4138	    (done_once == 0)) {
4139		/*
4140		 * huh, this should not happen unless all packets are
4141		 * PR-SCTP and marked to skip of course.
4142		 */
4143		if (sctp_fs_audit(asoc)) {
4144			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4145				net->flight_size = 0;
4146			}
4147			asoc->total_flight = 0;
4148			asoc->total_flight_count = 0;
4149			asoc->sent_queue_retran_cnt = 0;
4150			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4151				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4152					sctp_flight_size_increase(tp1);
4153					sctp_total_flight_increase(stcb, tp1);
4154				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4155					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4156				}
4157			}
4158		}
4159		done_once = 1;
4160		goto again;
4161	}
4162	/**********************************/
4163	/* Now what about shutdown issues */
4164	/**********************************/
4165	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4166		/* nothing left on sendqueue.. consider done */
4167		/* clean up */
4168		if ((asoc->stream_queue_cnt == 1) &&
4169		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4170		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4171		    (asoc->locked_on_sending)
4172		    ) {
4173			struct sctp_stream_queue_pending *sp;
4174
4175			/*
4176			 * I may be in a state where we got all across.. but
4177			 * cannot write more due to a shutdown... we abort
4178			 * since the user did not indicate EOR in this case.
4179			 * The sp will be cleaned during free of the asoc.
4180			 */
4181			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4182			    sctp_streamhead);
4183			if ((sp) && (sp->length == 0)) {
4184				/* Let cleanup code purge it */
4185				if (sp->msg_is_complete) {
4186					asoc->stream_queue_cnt--;
4187				} else {
4188					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4189					asoc->locked_on_sending = NULL;
4190					asoc->stream_queue_cnt--;
4191				}
4192			}
4193		}
4194		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4195		    (asoc->stream_queue_cnt == 0)) {
4196			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4197				/* Need to abort here */
4198				struct mbuf *oper;
4199
4200		abort_out_now:
4201				*abort_now = 1;
4202				/* XXX */
4203				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4204				    0, M_DONTWAIT, 1, MT_DATA);
4205				if (oper) {
4206					struct sctp_paramhdr *ph;
4207					uint32_t *ippp;
4208
4209					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4210					    sizeof(uint32_t);
4211					ph = mtod(oper, struct sctp_paramhdr *);
4212					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4213					ph->param_length = htons(SCTP_BUF_LEN(oper));
4214					ippp = (uint32_t *) (ph + 1);
4215					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4216				}
4217				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4218				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4219			} else {
4220				struct sctp_nets *netp;
4221
4222				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4223				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4224					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4225				}
4226				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4227				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4228				sctp_stop_timers_for_shutdown(stcb);
4229				if (asoc->alternate) {
4230					netp = asoc->alternate;
4231				} else {
4232					netp = asoc->primary_destination;
4233				}
4234				sctp_send_shutdown(stcb, netp);
4235				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4236				    stcb->sctp_ep, stcb, netp);
4237				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4238				    stcb->sctp_ep, stcb, netp);
4239			}
4240		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4241		    (asoc->stream_queue_cnt == 0)) {
4242			struct sctp_nets *netp;
4243
4244			if (asoc->alternate) {
4245				netp = asoc->alternate;
4246			} else {
4247				netp = asoc->primary_destination;
4248			}
4249			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4250				goto abort_out_now;
4251			}
4252			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4253			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4254			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4255			sctp_send_shutdown_ack(stcb, netp);
4256			sctp_stop_timers_for_shutdown(stcb);
4257			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4258			    stcb->sctp_ep, stcb, netp);
4259		}
4260	}
4261	/*********************************************/
4262	/* Here we perform PR-SCTP procedures        */
4263	/* (section 4.2)                             */
4264	/*********************************************/
4265	/* C1. update advancedPeerAckPoint */
4266	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4267		asoc->advanced_peer_ack_point = cumack;
4268	}
4269	/* PR-Sctp issues need to be addressed too */
4270	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4271		struct sctp_tmit_chunk *lchk;
4272		uint32_t old_adv_peer_ack_point;
4273
4274		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4275		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4276		/* C3. See if we need to send a Fwd-TSN */
4277		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4278			/*
4279			 * ISSUE with ECN, see FWD-TSN processing.
4280			 */
4281			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4282				send_forward_tsn(stcb, asoc);
4283			} else if (lchk) {
4284				/* try to FR fwd-tsn's that get lost too */
4285				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4286					send_forward_tsn(stcb, asoc);
4287				}
4288			}
4289		}
4290		if (lchk) {
4291			/* Assure a timer is up */
4292			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4293			    stcb->sctp_ep, stcb, lchk->whoTo);
4294		}
4295	}
4296	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4297		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4298		    rwnd,
4299		    stcb->asoc.peers_rwnd,
4300		    stcb->asoc.total_flight,
4301		    stcb->asoc.total_output_queue_size);
4302	}
4303}
4304
4305void
4306sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4307    struct sctp_tcb *stcb,
4308    uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4309    int *abort_now, uint8_t flags,
4310    uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4311{
4312	struct sctp_association *asoc;
4313	struct sctp_tmit_chunk *tp1, *tp2;
4314	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4315	uint16_t wake_him = 0;
4316	uint32_t send_s = 0;
4317	long j;
4318	int accum_moved = 0;
4319	int will_exit_fast_recovery = 0;
4320	uint32_t a_rwnd, old_rwnd;
4321	int win_probe_recovery = 0;
4322	int win_probe_recovered = 0;
4323	struct sctp_nets *net = NULL;
4324	int done_once;
4325	int rto_ok = 1;
4326	uint8_t reneged_all = 0;
4327	uint8_t cmt_dac_flag;
4328
4329	/*
4330	 * we take any chance we can to service our queues since we cannot
4331	 * get awoken when the socket is read from :<
4332	 */
4333	/*
4334	 * Now perform the actual SACK handling: 1) Verify that it is not an
4335	 * old sack, if so discard. 2) If there is nothing left in the send
4336	 * queue (cum-ack is equal to last acked) then you have a duplicate
4337	 * too, update any rwnd change and verify no timers are running.
4338	 * then return. 3) Process any new consequtive data i.e. cum-ack
4339	 * moved process these first and note that it moved. 4) Process any
4340	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4341	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4342	 * sync up flightsizes and things, stop all timers and also check
4343	 * for shutdown_pending state. If so then go ahead and send off the
4344	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4345	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4346	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4347	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4348	 * if in shutdown_recv state.
4349	 */
4350	SCTP_TCB_LOCK_ASSERT(stcb);
4351	/* CMT DAC algo */
4352	this_sack_lowest_newack = 0;
4353	SCTP_STAT_INCR(sctps_slowpath_sack);
4354	last_tsn = cum_ack;
4355	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4356#ifdef SCTP_ASOCLOG_OF_TSNS
4357	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4358	stcb->asoc.cumack_log_at++;
4359	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4360		stcb->asoc.cumack_log_at = 0;
4361	}
4362#endif
4363	a_rwnd = rwnd;
4364
4365	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4366		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4367		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4368	}
4369	old_rwnd = stcb->asoc.peers_rwnd;
4370	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4371		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4372		    stcb->asoc.overall_error_count,
4373		    0,
4374		    SCTP_FROM_SCTP_INDATA,
4375		    __LINE__);
4376	}
4377	stcb->asoc.overall_error_count = 0;
4378	asoc = &stcb->asoc;
4379	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4380		sctp_log_sack(asoc->last_acked_seq,
4381		    cum_ack,
4382		    0,
4383		    num_seg,
4384		    num_dup,
4385		    SCTP_LOG_NEW_SACK);
4386	}
4387	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4388		uint16_t i;
4389		uint32_t *dupdata, dblock;
4390
4391		for (i = 0; i < num_dup; i++) {
4392			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4393			    sizeof(uint32_t), (uint8_t *) & dblock);
4394			if (dupdata == NULL) {
4395				break;
4396			}
4397			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4398		}
4399	}
4400	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4401		/* reality check */
4402		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4403			tp1 = TAILQ_LAST(&asoc->sent_queue,
4404			    sctpchunk_listhead);
4405			send_s = tp1->rec.data.TSN_seq + 1;
4406		} else {
4407			tp1 = NULL;
4408			send_s = asoc->sending_seq;
4409		}
4410		if (SCTP_TSN_GE(cum_ack, send_s)) {
4411			struct mbuf *oper;
4412
4413			/*
4414			 * no way, we have not even sent this TSN out yet.
4415			 * Peer is hopelessly messed up with us.
4416			 */
4417			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4418			    cum_ack, send_s);
4419			if (tp1) {
4420				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4421				    tp1->rec.data.TSN_seq, tp1);
4422			}
4423	hopeless_peer:
4424			*abort_now = 1;
4425			/* XXX */
4426			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4427			    0, M_DONTWAIT, 1, MT_DATA);
4428			if (oper) {
4429				struct sctp_paramhdr *ph;
4430				uint32_t *ippp;
4431
4432				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4433				    sizeof(uint32_t);
4434				ph = mtod(oper, struct sctp_paramhdr *);
4435				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4436				ph->param_length = htons(SCTP_BUF_LEN(oper));
4437				ippp = (uint32_t *) (ph + 1);
4438				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4439			}
4440			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4441			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4442			return;
4443		}
4444	}
4445	/**********************/
4446	/* 1) check the range */
4447	/**********************/
4448	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4449		/* acking something behind */
4450		return;
4451	}
4452	/* update the Rwnd of the peer */
4453	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4454	    TAILQ_EMPTY(&asoc->send_queue) &&
4455	    (asoc->stream_queue_cnt == 0)) {
4456		/* nothing left on send/sent and strmq */
4457		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4458			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4459			    asoc->peers_rwnd, 0, 0, a_rwnd);
4460		}
4461		asoc->peers_rwnd = a_rwnd;
4462		if (asoc->sent_queue_retran_cnt) {
4463			asoc->sent_queue_retran_cnt = 0;
4464		}
4465		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4466			/* SWS sender side engages */
4467			asoc->peers_rwnd = 0;
4468		}
4469		/* stop any timers */
4470		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4471			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4472			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4473			net->partial_bytes_acked = 0;
4474			net->flight_size = 0;
4475		}
4476		asoc->total_flight = 0;
4477		asoc->total_flight_count = 0;
4478		return;
4479	}
4480	/*
4481	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4482	 * things. The total byte count acked is tracked in netAckSz AND
4483	 * netAck2 is used to track the total bytes acked that are un-
4484	 * amibguious and were never retransmitted. We track these on a per
4485	 * destination address basis.
4486	 */
4487	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4488		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4489			/* Drag along the window_tsn for cwr's */
4490			net->cwr_window_tsn = cum_ack;
4491		}
4492		net->prev_cwnd = net->cwnd;
4493		net->net_ack = 0;
4494		net->net_ack2 = 0;
4495
4496		/*
4497		 * CMT: Reset CUC and Fast recovery algo variables before
4498		 * SACK processing
4499		 */
4500		net->new_pseudo_cumack = 0;
4501		net->will_exit_fast_recovery = 0;
4502		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4503			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4504		}
4505	}
4506	/* process the new consecutive TSN first */
4507	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4508		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4509			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4510				accum_moved = 1;
4511				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4512					/*
4513					 * If it is less than ACKED, it is
4514					 * now no-longer in flight. Higher
4515					 * values may occur during marking
4516					 */
4517					if ((tp1->whoTo->dest_state &
4518					    SCTP_ADDR_UNCONFIRMED) &&
4519					    (tp1->snd_count < 2)) {
4520						/*
4521						 * If there was no retran
4522						 * and the address is
4523						 * un-confirmed and we sent
4524						 * there and are now
4525						 * sacked.. its confirmed,
4526						 * mark it so.
4527						 */
4528						tp1->whoTo->dest_state &=
4529						    ~SCTP_ADDR_UNCONFIRMED;
4530					}
4531					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4532						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4533							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4534							    tp1->whoTo->flight_size,
4535							    tp1->book_size,
4536							    (uintptr_t) tp1->whoTo,
4537							    tp1->rec.data.TSN_seq);
4538						}
4539						sctp_flight_size_decrease(tp1);
4540						sctp_total_flight_decrease(stcb, tp1);
4541						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4542							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4543							    tp1);
4544						}
4545					}
4546					tp1->whoTo->net_ack += tp1->send_size;
4547
4548					/* CMT SFR and DAC algos */
4549					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4550					tp1->whoTo->saw_newack = 1;
4551
4552					if (tp1->snd_count < 2) {
4553						/*
4554						 * True non-retransmited
4555						 * chunk
4556						 */
4557						tp1->whoTo->net_ack2 +=
4558						    tp1->send_size;
4559
4560						/* update RTO too? */
4561						if (tp1->do_rtt) {
4562							if (rto_ok) {
4563								tp1->whoTo->RTO =
4564								    sctp_calculate_rto(stcb,
4565								    asoc, tp1->whoTo,
4566								    &tp1->sent_rcv_time,
4567								    sctp_align_safe_nocopy,
4568								    SCTP_RTT_FROM_DATA);
4569								rto_ok = 0;
4570							}
4571							if (tp1->whoTo->rto_needed == 0) {
4572								tp1->whoTo->rto_needed = 1;
4573							}
4574							tp1->do_rtt = 0;
4575						}
4576					}
4577					/*
4578					 * CMT: CUCv2 algorithm. From the
4579					 * cumack'd TSNs, for each TSN being
4580					 * acked for the first time, set the
4581					 * following variables for the
4582					 * corresp destination.
4583					 * new_pseudo_cumack will trigger a
4584					 * cwnd update.
4585					 * find_(rtx_)pseudo_cumack will
4586					 * trigger search for the next
4587					 * expected (rtx-)pseudo-cumack.
4588					 */
4589					tp1->whoTo->new_pseudo_cumack = 1;
4590					tp1->whoTo->find_pseudo_cumack = 1;
4591					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4592
4593
4594					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4595						sctp_log_sack(asoc->last_acked_seq,
4596						    cum_ack,
4597						    tp1->rec.data.TSN_seq,
4598						    0,
4599						    0,
4600						    SCTP_LOG_TSN_ACKED);
4601					}
4602					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4603						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4604					}
4605				}
4606				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4607					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4608#ifdef SCTP_AUDITING_ENABLED
4609					sctp_audit_log(0xB3,
4610					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4611#endif
4612				}
4613				if (tp1->rec.data.chunk_was_revoked) {
4614					/* deflate the cwnd */
4615					tp1->whoTo->cwnd -= tp1->book_size;
4616					tp1->rec.data.chunk_was_revoked = 0;
4617				}
4618				tp1->sent = SCTP_DATAGRAM_ACKED;
4619			}
4620		} else {
4621			break;
4622		}
4623	}
4624	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4625	/* always set this up to cum-ack */
4626	asoc->this_sack_highest_gap = last_tsn;
4627
4628	if ((num_seg > 0) || (num_nr_seg > 0)) {
4629
4630		/*
4631		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4632		 * to be greater than the cumack. Also reset saw_newack to 0
4633		 * for all dests.
4634		 */
4635		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4636			net->saw_newack = 0;
4637			net->this_sack_highest_newack = last_tsn;
4638		}
4639
4640		/*
4641		 * thisSackHighestGap will increase while handling NEW
4642		 * segments this_sack_highest_newack will increase while
4643		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4644		 * used for CMT DAC algo. saw_newack will also change.
4645		 */
4646		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4647		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4648		    num_seg, num_nr_seg, &rto_ok)) {
4649			wake_him++;
4650		}
4651		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4652			/*
4653			 * validate the biggest_tsn_acked in the gap acks if
4654			 * strict adherence is wanted.
4655			 */
4656			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4657				/*
4658				 * peer is either confused or we are under
4659				 * attack. We must abort.
4660				 */
4661				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4662				    biggest_tsn_acked, send_s);
4663				goto hopeless_peer;
4664			}
4665		}
4666	}
4667	/*******************************************/
4668	/* cancel ALL T3-send timer if accum moved */
4669	/*******************************************/
4670	if (asoc->sctp_cmt_on_off > 0) {
4671		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4672			if (net->new_pseudo_cumack)
4673				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4674				    stcb, net,
4675				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4676
4677		}
4678	} else {
4679		if (accum_moved) {
4680			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4681				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4682				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4683			}
4684		}
4685	}
4686	/********************************************/
4687	/* drop the acked chunks from the sentqueue */
4688	/********************************************/
4689	asoc->last_acked_seq = cum_ack;
4690
4691	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4692		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4693			break;
4694		}
4695		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4696			/* no more sent on list */
4697			SCTP_PRINTF("Warning, tp1->sent == %d and its now acked?\n",
4698			    tp1->sent);
4699		}
4700		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4701		if (tp1->pr_sctp_on) {
4702			if (asoc->pr_sctp_cnt != 0)
4703				asoc->pr_sctp_cnt--;
4704		}
4705		asoc->sent_queue_cnt--;
4706		if (tp1->data) {
4707			/* sa_ignore NO_NULL_CHK */
4708			sctp_free_bufspace(stcb, asoc, tp1, 1);
4709			sctp_m_freem(tp1->data);
4710			tp1->data = NULL;
4711			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4712				asoc->sent_queue_cnt_removeable--;
4713			}
4714		}
4715		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4716			sctp_log_sack(asoc->last_acked_seq,
4717			    cum_ack,
4718			    tp1->rec.data.TSN_seq,
4719			    0,
4720			    0,
4721			    SCTP_LOG_FREE_SENT);
4722		}
4723		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4724		wake_him++;
4725	}
4726	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4727#ifdef INVARIANTS
4728		panic("Warning flight size is postive and should be 0");
4729#else
4730		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4731		    asoc->total_flight);
4732#endif
4733		asoc->total_flight = 0;
4734	}
4735	/* sa_ignore NO_NULL_CHK */
4736	if ((wake_him) && (stcb->sctp_socket)) {
4737#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4738		struct socket *so;
4739
4740#endif
4741		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4742		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4743			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4744		}
4745#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4746		so = SCTP_INP_SO(stcb->sctp_ep);
4747		atomic_add_int(&stcb->asoc.refcnt, 1);
4748		SCTP_TCB_UNLOCK(stcb);
4749		SCTP_SOCKET_LOCK(so, 1);
4750		SCTP_TCB_LOCK(stcb);
4751		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4752		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4753			/* assoc was freed while we were unlocked */
4754			SCTP_SOCKET_UNLOCK(so, 1);
4755			return;
4756		}
4757#endif
4758		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4759#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4760		SCTP_SOCKET_UNLOCK(so, 1);
4761#endif
4762	} else {
4763		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4764			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4765		}
4766	}
4767
4768	if (asoc->fast_retran_loss_recovery && accum_moved) {
4769		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4770			/* Setup so we will exit RFC2582 fast recovery */
4771			will_exit_fast_recovery = 1;
4772		}
4773	}
4774	/*
4775	 * Check for revoked fragments:
4776	 *
4777	 * if Previous sack - Had no frags then we can't have any revoked if
4778	 * Previous sack - Had frag's then - If we now have frags aka
4779	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4780	 * some of them. else - The peer revoked all ACKED fragments, since
4781	 * we had some before and now we have NONE.
4782	 */
4783
4784	if (num_seg) {
4785		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4786		asoc->saw_sack_with_frags = 1;
4787	} else if (asoc->saw_sack_with_frags) {
4788		int cnt_revoked = 0;
4789
4790		/* Peer revoked all dg's marked or acked */
4791		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4792			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4793				tp1->sent = SCTP_DATAGRAM_SENT;
4794				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4795					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4796					    tp1->whoTo->flight_size,
4797					    tp1->book_size,
4798					    (uintptr_t) tp1->whoTo,
4799					    tp1->rec.data.TSN_seq);
4800				}
4801				sctp_flight_size_increase(tp1);
4802				sctp_total_flight_increase(stcb, tp1);
4803				tp1->rec.data.chunk_was_revoked = 1;
4804				/*
4805				 * To ensure that this increase in
4806				 * flightsize, which is artificial, does not
4807				 * throttle the sender, we also increase the
4808				 * cwnd artificially.
4809				 */
4810				tp1->whoTo->cwnd += tp1->book_size;
4811				cnt_revoked++;
4812			}
4813		}
4814		if (cnt_revoked) {
4815			reneged_all = 1;
4816		}
4817		asoc->saw_sack_with_frags = 0;
4818	}
4819	if (num_nr_seg > 0)
4820		asoc->saw_sack_with_nr_frags = 1;
4821	else
4822		asoc->saw_sack_with_nr_frags = 0;
4823
4824	/* JRS - Use the congestion control given in the CC module */
4825	if (ecne_seen == 0) {
4826		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4827			if (net->net_ack2 > 0) {
4828				/*
4829				 * Karn's rule applies to clearing error
4830				 * count, this is optional.
4831				 */
4832				net->error_count = 0;
4833				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4834					/* addr came good */
4835					net->dest_state |= SCTP_ADDR_REACHABLE;
4836					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4837					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4838				}
4839				if (net == stcb->asoc.primary_destination) {
4840					if (stcb->asoc.alternate) {
4841						/*
4842						 * release the alternate,
4843						 * primary is good
4844						 */
4845						sctp_free_remote_addr(stcb->asoc.alternate);
4846						stcb->asoc.alternate = NULL;
4847					}
4848				}
4849				if (net->dest_state & SCTP_ADDR_PF) {
4850					net->dest_state &= ~SCTP_ADDR_PF;
4851					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4852					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4853					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4854					/* Done with this net */
4855					net->net_ack = 0;
4856				}
4857				/* restore any doubled timers */
4858				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4859				if (net->RTO < stcb->asoc.minrto) {
4860					net->RTO = stcb->asoc.minrto;
4861				}
4862				if (net->RTO > stcb->asoc.maxrto) {
4863					net->RTO = stcb->asoc.maxrto;
4864				}
4865			}
4866		}
4867		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4868	}
4869	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4870		/* nothing left in-flight */
4871		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4872			/* stop all timers */
4873			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4874			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4875			net->flight_size = 0;
4876			net->partial_bytes_acked = 0;
4877		}
4878		asoc->total_flight = 0;
4879		asoc->total_flight_count = 0;
4880	}
4881	/**********************************/
4882	/* Now what about shutdown issues */
4883	/**********************************/
4884	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4885		/* nothing left on sendqueue.. consider done */
4886		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4887			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4888			    asoc->peers_rwnd, 0, 0, a_rwnd);
4889		}
4890		asoc->peers_rwnd = a_rwnd;
4891		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4892			/* SWS sender side engages */
4893			asoc->peers_rwnd = 0;
4894		}
4895		/* clean up */
4896		if ((asoc->stream_queue_cnt == 1) &&
4897		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4898		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4899		    (asoc->locked_on_sending)
4900		    ) {
4901			struct sctp_stream_queue_pending *sp;
4902
4903			/*
4904			 * I may be in a state where we got all across.. but
4905			 * cannot write more due to a shutdown... we abort
4906			 * since the user did not indicate EOR in this case.
4907			 */
4908			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4909			    sctp_streamhead);
4910			if ((sp) && (sp->length == 0)) {
4911				asoc->locked_on_sending = NULL;
4912				if (sp->msg_is_complete) {
4913					asoc->stream_queue_cnt--;
4914				} else {
4915					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4916					asoc->stream_queue_cnt--;
4917				}
4918			}
4919		}
4920		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4921		    (asoc->stream_queue_cnt == 0)) {
4922			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4923				/* Need to abort here */
4924				struct mbuf *oper;
4925
4926		abort_out_now:
4927				*abort_now = 1;
4928				/* XXX */
4929				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4930				    0, M_DONTWAIT, 1, MT_DATA);
4931				if (oper) {
4932					struct sctp_paramhdr *ph;
4933					uint32_t *ippp;
4934
4935					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4936					    sizeof(uint32_t);
4937					ph = mtod(oper, struct sctp_paramhdr *);
4938					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4939					ph->param_length = htons(SCTP_BUF_LEN(oper));
4940					ippp = (uint32_t *) (ph + 1);
4941					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4942				}
4943				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4944				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4945				return;
4946			} else {
4947				struct sctp_nets *netp;
4948
4949				if (asoc->alternate) {
4950					netp = asoc->alternate;
4951				} else {
4952					netp = asoc->primary_destination;
4953				}
4954				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4955				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4956					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4957				}
4958				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4959				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4960				sctp_stop_timers_for_shutdown(stcb);
4961				sctp_send_shutdown(stcb, netp);
4962				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4963				    stcb->sctp_ep, stcb, netp);
4964				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4965				    stcb->sctp_ep, stcb, netp);
4966			}
4967			return;
4968		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4969		    (asoc->stream_queue_cnt == 0)) {
4970			struct sctp_nets *netp;
4971
4972			if (asoc->alternate) {
4973				netp = asoc->alternate;
4974			} else {
4975				netp = asoc->primary_destination;
4976			}
4977			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4978				goto abort_out_now;
4979			}
4980			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4981			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4982			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4983			sctp_send_shutdown_ack(stcb, netp);
4984			sctp_stop_timers_for_shutdown(stcb);
4985			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4986			    stcb->sctp_ep, stcb, netp);
4987			return;
4988		}
4989	}
4990	/*
4991	 * Now here we are going to recycle net_ack for a different use...
4992	 * HEADS UP.
4993	 */
4994	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4995		net->net_ack = 0;
4996	}
4997
4998	/*
4999	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5000	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5001	 * automatically ensure that.
5002	 */
5003	if ((asoc->sctp_cmt_on_off > 0) &&
5004	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5005	    (cmt_dac_flag == 0)) {
5006		this_sack_lowest_newack = cum_ack;
5007	}
5008	if ((num_seg > 0) || (num_nr_seg > 0)) {
5009		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5010		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5011	}
5012	/* JRS - Use the congestion control given in the CC module */
5013	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5014
5015	/* Now are we exiting loss recovery ? */
5016	if (will_exit_fast_recovery) {
5017		/* Ok, we must exit fast recovery */
5018		asoc->fast_retran_loss_recovery = 0;
5019	}
5020	if ((asoc->sat_t3_loss_recovery) &&
5021	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5022		/* end satellite t3 loss recovery */
5023		asoc->sat_t3_loss_recovery = 0;
5024	}
5025	/*
5026	 * CMT Fast recovery
5027	 */
5028	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5029		if (net->will_exit_fast_recovery) {
5030			/* Ok, we must exit fast recovery */
5031			net->fast_retran_loss_recovery = 0;
5032		}
5033	}
5034
5035	/* Adjust and set the new rwnd value */
5036	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5037		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5038		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5039	}
5040	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5041	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5042	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5043		/* SWS sender side engages */
5044		asoc->peers_rwnd = 0;
5045	}
5046	if (asoc->peers_rwnd > old_rwnd) {
5047		win_probe_recovery = 1;
5048	}
5049	/*
5050	 * Now we must setup so we have a timer up for anyone with
5051	 * outstanding data.
5052	 */
5053	done_once = 0;
5054again:
5055	j = 0;
5056	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5057		if (win_probe_recovery && (net->window_probe)) {
5058			win_probe_recovered = 1;
5059			/*-
5060			 * Find first chunk that was used with
5061			 * window probe and clear the event. Put
5062			 * it back into the send queue as if has
5063			 * not been sent.
5064			 */
5065			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5066				if (tp1->window_probe) {
5067					sctp_window_probe_recovery(stcb, asoc, tp1);
5068					break;
5069				}
5070			}
5071		}
5072		if (net->flight_size) {
5073			j++;
5074			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5075				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5076				    stcb->sctp_ep, stcb, net);
5077			}
5078			if (net->window_probe) {
5079				net->window_probe = 0;
5080			}
5081		} else {
5082			if (net->window_probe) {
5083				/*
5084				 * In window probes we must assure a timer
5085				 * is still running there
5086				 */
5087				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5088					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5089					    stcb->sctp_ep, stcb, net);
5090
5091				}
5092			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5093				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5094				    stcb, net,
5095				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5096			}
5097		}
5098	}
5099	if ((j == 0) &&
5100	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5101	    (asoc->sent_queue_retran_cnt == 0) &&
5102	    (win_probe_recovered == 0) &&
5103	    (done_once == 0)) {
5104		/*
5105		 * huh, this should not happen unless all packets are
5106		 * PR-SCTP and marked to skip of course.
5107		 */
5108		if (sctp_fs_audit(asoc)) {
5109			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5110				net->flight_size = 0;
5111			}
5112			asoc->total_flight = 0;
5113			asoc->total_flight_count = 0;
5114			asoc->sent_queue_retran_cnt = 0;
5115			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5116				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5117					sctp_flight_size_increase(tp1);
5118					sctp_total_flight_increase(stcb, tp1);
5119				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5120					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5121				}
5122			}
5123		}
5124		done_once = 1;
5125		goto again;
5126	}
5127	/*********************************************/
5128	/* Here we perform PR-SCTP procedures        */
5129	/* (section 4.2)                             */
5130	/*********************************************/
5131	/* C1. update advancedPeerAckPoint */
5132	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5133		asoc->advanced_peer_ack_point = cum_ack;
5134	}
5135	/* C2. try to further move advancedPeerAckPoint ahead */
5136	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5137		struct sctp_tmit_chunk *lchk;
5138		uint32_t old_adv_peer_ack_point;
5139
5140		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5141		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5142		/* C3. See if we need to send a Fwd-TSN */
5143		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5144			/*
5145			 * ISSUE with ECN, see FWD-TSN processing.
5146			 */
5147			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5148				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5149				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5150				    old_adv_peer_ack_point);
5151			}
5152			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5153				send_forward_tsn(stcb, asoc);
5154			} else if (lchk) {
5155				/* try to FR fwd-tsn's that get lost too */
5156				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5157					send_forward_tsn(stcb, asoc);
5158				}
5159			}
5160		}
5161		if (lchk) {
5162			/* Assure a timer is up */
5163			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5164			    stcb->sctp_ep, stcb, lchk->whoTo);
5165		}
5166	}
5167	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5168		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5169		    a_rwnd,
5170		    stcb->asoc.peers_rwnd,
5171		    stcb->asoc.total_flight,
5172		    stcb->asoc.total_output_queue_size);
5173	}
5174}
5175
5176void
5177sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5178{
5179	/* Copy cum-ack */
5180	uint32_t cum_ack, a_rwnd;
5181
5182	cum_ack = ntohl(cp->cumulative_tsn_ack);
5183	/* Arrange so a_rwnd does NOT change */
5184	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5185
5186	/* Now call the express sack handling */
5187	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5188}
5189
5190static void
5191sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5192    struct sctp_stream_in *strmin)
5193{
5194	struct sctp_queued_to_read *ctl, *nctl;
5195	struct sctp_association *asoc;
5196	uint16_t tt;
5197
5198	asoc = &stcb->asoc;
5199	tt = strmin->last_sequence_delivered;
5200	/*
5201	 * First deliver anything prior to and including the stream no that
5202	 * came in
5203	 */
5204	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5205		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5206			/* this is deliverable now */
5207			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5208			/* subtract pending on streams */
5209			asoc->size_on_all_streams -= ctl->length;
5210			sctp_ucount_decr(asoc->cnt_on_all_streams);
5211			/* deliver it to at least the delivery-q */
5212			if (stcb->sctp_socket) {
5213				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5214				sctp_add_to_readq(stcb->sctp_ep, stcb,
5215				    ctl,
5216				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5217			}
5218		} else {
5219			/* no more delivery now. */
5220			break;
5221		}
5222	}
5223	/*
5224	 * now we must deliver things in queue the normal way  if any are
5225	 * now ready.
5226	 */
5227	tt = strmin->last_sequence_delivered + 1;
5228	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5229		if (tt == ctl->sinfo_ssn) {
5230			/* this is deliverable now */
5231			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5232			/* subtract pending on streams */
5233			asoc->size_on_all_streams -= ctl->length;
5234			sctp_ucount_decr(asoc->cnt_on_all_streams);
5235			/* deliver it to at least the delivery-q */
5236			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5237			if (stcb->sctp_socket) {
5238				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5239				sctp_add_to_readq(stcb->sctp_ep, stcb,
5240				    ctl,
5241				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5242
5243			}
5244			tt = strmin->last_sequence_delivered + 1;
5245		} else {
5246			break;
5247		}
5248	}
5249}
5250
5251static void
5252sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5253    struct sctp_association *asoc,
5254    uint16_t stream, uint16_t seq)
5255{
5256	struct sctp_tmit_chunk *chk, *nchk;
5257
5258	/* For each one on here see if we need to toss it */
5259	/*
5260	 * For now large messages held on the reasmqueue that are complete
5261	 * will be tossed too. We could in theory do more work to spin
5262	 * through and stop after dumping one msg aka seeing the start of a
5263	 * new msg at the head, and call the delivery function... to see if
5264	 * it can be delivered... But for now we just dump everything on the
5265	 * queue.
5266	 */
5267	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5268		/*
5269		 * Do not toss it if on a different stream or marked for
5270		 * unordered delivery in which case the stream sequence
5271		 * number has no meaning.
5272		 */
5273		if ((chk->rec.data.stream_number != stream) ||
5274		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5275			continue;
5276		}
5277		if (chk->rec.data.stream_seq == seq) {
5278			/* It needs to be tossed */
5279			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5280			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5281				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5282				asoc->str_of_pdapi = chk->rec.data.stream_number;
5283				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5284				asoc->fragment_flags = chk->rec.data.rcv_flags;
5285			}
5286			asoc->size_on_reasm_queue -= chk->send_size;
5287			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5288
5289			/* Clear up any stream problem */
5290			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5291			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5292				/*
5293				 * We must dump forward this streams
5294				 * sequence number if the chunk is not
5295				 * unordered that is being skipped. There is
5296				 * a chance that if the peer does not
5297				 * include the last fragment in its FWD-TSN
5298				 * we WILL have a problem here since you
5299				 * would have a partial chunk in queue that
5300				 * may not be deliverable. Also if a Partial
5301				 * delivery API as started the user may get
5302				 * a partial chunk. The next read returning
5303				 * a new chunk... really ugly but I see no
5304				 * way around it! Maybe a notify??
5305				 */
5306				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5307			}
5308			if (chk->data) {
5309				sctp_m_freem(chk->data);
5310				chk->data = NULL;
5311			}
5312			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5313		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5314			/*
5315			 * If the stream_seq is > than the purging one, we
5316			 * are done
5317			 */
5318			break;
5319		}
5320	}
5321}
5322
5323
5324void
5325sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5326    struct sctp_forward_tsn_chunk *fwd,
5327    int *abort_flag, struct mbuf *m, int offset)
5328{
5329	/* The pr-sctp fwd tsn */
5330	/*
5331	 * here we will perform all the data receiver side steps for
5332	 * processing FwdTSN, as required in by pr-sctp draft:
5333	 *
5334	 * Assume we get FwdTSN(x):
5335	 *
5336	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5337	 * others we have 3) examine and update re-ordering queue on
5338	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5339	 * report where we are.
5340	 */
5341	struct sctp_association *asoc;
5342	uint32_t new_cum_tsn, gap;
5343	unsigned int i, fwd_sz, m_size;
5344	uint32_t str_seq;
5345	struct sctp_stream_in *strm;
5346	struct sctp_tmit_chunk *chk, *nchk;
5347	struct sctp_queued_to_read *ctl, *sv;
5348
5349	asoc = &stcb->asoc;
5350	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5351		SCTPDBG(SCTP_DEBUG_INDATA1,
5352		    "Bad size too small/big fwd-tsn\n");
5353		return;
5354	}
5355	m_size = (stcb->asoc.mapping_array_size << 3);
5356	/*************************************************************/
5357	/* 1. Here we update local cumTSN and shift the bitmap array */
5358	/*************************************************************/
5359	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5360
5361	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5362		/* Already got there ... */
5363		return;
5364	}
5365	/*
5366	 * now we know the new TSN is more advanced, let's find the actual
5367	 * gap
5368	 */
5369	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5370	asoc->cumulative_tsn = new_cum_tsn;
5371	if (gap >= m_size) {
5372		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5373			struct mbuf *oper;
5374
5375			/*
5376			 * out of range (of single byte chunks in the rwnd I
5377			 * give out). This must be an attacker.
5378			 */
5379			*abort_flag = 1;
5380			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5381			    0, M_DONTWAIT, 1, MT_DATA);
5382			if (oper) {
5383				struct sctp_paramhdr *ph;
5384				uint32_t *ippp;
5385
5386				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5387				    (sizeof(uint32_t) * 3);
5388				ph = mtod(oper, struct sctp_paramhdr *);
5389				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5390				ph->param_length = htons(SCTP_BUF_LEN(oper));
5391				ippp = (uint32_t *) (ph + 1);
5392				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5393				ippp++;
5394				*ippp = asoc->highest_tsn_inside_map;
5395				ippp++;
5396				*ippp = new_cum_tsn;
5397			}
5398			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5399			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5400			return;
5401		}
5402		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5403
5404		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5405		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5406		asoc->highest_tsn_inside_map = new_cum_tsn;
5407
5408		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5409		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5410
5411		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5412			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5413		}
5414	} else {
5415		SCTP_TCB_LOCK_ASSERT(stcb);
5416		for (i = 0; i <= gap; i++) {
5417			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5418			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5419				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5420				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5421					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5422				}
5423			}
5424		}
5425	}
5426	/*************************************************************/
5427	/* 2. Clear up re-assembly queue                             */
5428	/*************************************************************/
5429	/*
5430	 * First service it if pd-api is up, just in case we can progress it
5431	 * forward
5432	 */
5433	if (asoc->fragmented_delivery_inprogress) {
5434		sctp_service_reassembly(stcb, asoc);
5435	}
5436	/* For each one on here see if we need to toss it */
5437	/*
5438	 * For now large messages held on the reasmqueue that are complete
5439	 * will be tossed too. We could in theory do more work to spin
5440	 * through and stop after dumping one msg aka seeing the start of a
5441	 * new msg at the head, and call the delivery function... to see if
5442	 * it can be delivered... But for now we just dump everything on the
5443	 * queue.
5444	 */
5445	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5446		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5447			/* It needs to be tossed */
5448			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5449			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5450				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5451				asoc->str_of_pdapi = chk->rec.data.stream_number;
5452				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5453				asoc->fragment_flags = chk->rec.data.rcv_flags;
5454			}
5455			asoc->size_on_reasm_queue -= chk->send_size;
5456			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5457
5458			/* Clear up any stream problem */
5459			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5460			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5461				/*
5462				 * We must dump forward this streams
5463				 * sequence number if the chunk is not
5464				 * unordered that is being skipped. There is
5465				 * a chance that if the peer does not
5466				 * include the last fragment in its FWD-TSN
5467				 * we WILL have a problem here since you
5468				 * would have a partial chunk in queue that
5469				 * may not be deliverable. Also if a Partial
5470				 * delivery API as started the user may get
5471				 * a partial chunk. The next read returning
5472				 * a new chunk... really ugly but I see no
5473				 * way around it! Maybe a notify??
5474				 */
5475				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5476			}
5477			if (chk->data) {
5478				sctp_m_freem(chk->data);
5479				chk->data = NULL;
5480			}
5481			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5482		} else {
5483			/*
5484			 * Ok we have gone beyond the end of the fwd-tsn's
5485			 * mark.
5486			 */
5487			break;
5488		}
5489	}
5490	/*******************************************************/
5491	/* 3. Update the PR-stream re-ordering queues and fix  */
5492	/* delivery issues as needed.                       */
5493	/*******************************************************/
5494	fwd_sz -= sizeof(*fwd);
5495	if (m && fwd_sz) {
5496		/* New method. */
5497		unsigned int num_str;
5498		struct sctp_strseq *stseq, strseqbuf;
5499
5500		offset += sizeof(*fwd);
5501
5502		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5503		num_str = fwd_sz / sizeof(struct sctp_strseq);
5504		for (i = 0; i < num_str; i++) {
5505			uint16_t st;
5506
5507			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5508			    sizeof(struct sctp_strseq),
5509			    (uint8_t *) & strseqbuf);
5510			offset += sizeof(struct sctp_strseq);
5511			if (stseq == NULL) {
5512				break;
5513			}
5514			/* Convert */
5515			st = ntohs(stseq->stream);
5516			stseq->stream = st;
5517			st = ntohs(stseq->sequence);
5518			stseq->sequence = st;
5519
5520			/* now process */
5521
5522			/*
5523			 * Ok we now look for the stream/seq on the read
5524			 * queue where its not all delivered. If we find it
5525			 * we transmute the read entry into a PDI_ABORTED.
5526			 */
5527			if (stseq->stream >= asoc->streamincnt) {
5528				/* screwed up streams, stop!  */
5529				break;
5530			}
5531			if ((asoc->str_of_pdapi == stseq->stream) &&
5532			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5533				/*
5534				 * If this is the one we were partially
5535				 * delivering now then we no longer are.
5536				 * Note this will change with the reassembly
5537				 * re-write.
5538				 */
5539				asoc->fragmented_delivery_inprogress = 0;
5540			}
5541			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5542			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5543				if ((ctl->sinfo_stream == stseq->stream) &&
5544				    (ctl->sinfo_ssn == stseq->sequence)) {
5545					str_seq = (stseq->stream << 16) | stseq->sequence;
5546					ctl->end_added = 1;
5547					ctl->pdapi_aborted = 1;
5548					sv = stcb->asoc.control_pdapi;
5549					stcb->asoc.control_pdapi = ctl;
5550					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5551					    stcb,
5552					    SCTP_PARTIAL_DELIVERY_ABORTED,
5553					    (void *)&str_seq,
5554					    SCTP_SO_NOT_LOCKED);
5555					stcb->asoc.control_pdapi = sv;
5556					break;
5557				} else if ((ctl->sinfo_stream == stseq->stream) &&
5558				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5559					/* We are past our victim SSN */
5560					break;
5561				}
5562			}
5563			strm = &asoc->strmin[stseq->stream];
5564			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5565				/* Update the sequence number */
5566				strm->last_sequence_delivered = stseq->sequence;
5567			}
5568			/* now kick the stream the new way */
5569			/* sa_ignore NO_NULL_CHK */
5570			sctp_kick_prsctp_reorder_queue(stcb, strm);
5571		}
5572		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5573	}
5574	/*
5575	 * Now slide thing forward.
5576	 */
5577	sctp_slide_mapping_arrays(stcb);
5578
5579	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5580		/* now lets kick out and check for more fragmented delivery */
5581		/* sa_ignore NO_NULL_CHK */
5582		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5583	}
5584}
5585