sctp_indata.c revision 166086
1/*-
2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 166086 2007-01-18 09:58:43Z rrs $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_pcb.h>
39#include <netinet/sctp_header.h>
40#include <netinet/sctputil.h>
41#include <netinet/sctp_output.h>
42#include <netinet/sctp_input.h>
43#include <netinet/sctp_indata.h>
44#include <netinet/sctp_uio.h>
45#include <netinet/sctp_timer.h>
46
47
48#ifdef SCTP_DEBUG
49extern uint32_t sctp_debug_on;
50
51#endif
52
53/*
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
57 *
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
60 * the list.
61 */
62
63extern int sctp_strict_sacks;
64
65__inline void
66sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
67{
68	uint32_t calc, calc_w_oh;
69
70	/*
71	 * This is really set wrong with respect to a 1-2-m socket. Since
72	 * the sb_cc is the count that everyone as put up. When we re-write
73	 * sctp_soreceive then we will fix this so that ONLY this
74	 * associations data is taken into account.
75	 */
76	if (stcb->sctp_socket == NULL)
77		return;
78
79	if (stcb->asoc.sb_cc == 0 &&
80	    asoc->size_on_reasm_queue == 0 &&
81	    asoc->size_on_all_streams == 0) {
82		/* Full rwnd granted */
83		asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat,
84		    SCTP_MINIMAL_RWND);
85		return;
86	}
87	/* get actual space */
88	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89
90	/*
91	 * take out what has NOT been put on socket queue and we yet hold
92	 * for putting up.
93	 */
94	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96
97	if (calc == 0) {
98		/* out of space */
99		asoc->my_rwnd = 0;
100		return;
101	}
102	/* what is the overhead of all these rwnd's */
103	calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
104	asoc->my_rwnd = calc;
105	if (calc_w_oh == 0) {
106		/*
107		 * If our overhead is greater than the advertised rwnd, we
108		 * clamp the rwnd to 1. This lets us still accept inbound
109		 * segments, but hopefully will shut the sender down when he
110		 * finally gets the message.
111		 */
112		asoc->my_rwnd = 1;
113	} else {
114		/* SWS threshold */
115		if (asoc->my_rwnd &&
116		    (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
117			/* SWS engaged, tell peer none left */
118			asoc->my_rwnd = 1;
119		}
120	}
121}
122
123/* Calculate what the rwnd would be */
124
125__inline uint32_t
126sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
127{
128	uint32_t calc = 0, calc_w_oh;
129
130	/*
131	 * This is really set wrong with respect to a 1-2-m socket. Since
132	 * the sb_cc is the count that everyone as put up. When we re-write
133	 * sctp_soreceive then we will fix this so that ONLY this
134	 * associations data is taken into account.
135	 */
136	if (stcb->sctp_socket == NULL)
137		return (calc);
138
139	if (stcb->asoc.sb_cc == 0 &&
140	    asoc->size_on_reasm_queue == 0 &&
141	    asoc->size_on_all_streams == 0) {
142		/* Full rwnd granted */
143		calc = max(stcb->sctp_socket->so_rcv.sb_hiwat,
144		    SCTP_MINIMAL_RWND);
145		return (calc);
146	}
147	/* get actual space */
148	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
149
150	/*
151	 * take out what has NOT been put on socket queue and we yet hold
152	 * for putting up.
153	 */
154	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
155	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
156
157	if (calc == 0) {
158		/* out of space */
159		return (calc);
160	}
161	/* what is the overhead of all these rwnd's */
162	calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
163	if (calc_w_oh == 0) {
164		/*
165		 * If our overhead is greater than the advertised rwnd, we
166		 * clamp the rwnd to 1. This lets us still accept inbound
167		 * segments, but hopefully will shut the sender down when he
168		 * finally gets the message.
169		 */
170		calc = 1;
171	} else {
172		/* SWS threshold */
173		if (calc &&
174		    (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
175			/* SWS engaged, tell peer none left */
176			calc = 1;
177		}
178	}
179	return (calc);
180}
181
182
183
184/*
185 * Build out our readq entry based on the incoming packet.
186 */
187struct sctp_queued_to_read *
188sctp_build_readq_entry(struct sctp_tcb *stcb,
189    struct sctp_nets *net,
190    uint32_t tsn, uint32_t ppid,
191    uint32_t context, uint16_t stream_no,
192    uint16_t stream_seq, uint8_t flags,
193    struct mbuf *dm)
194{
195	struct sctp_queued_to_read *read_queue_e = NULL;
196
197	sctp_alloc_a_readq(stcb, read_queue_e);
198	if (read_queue_e == NULL) {
199		goto failed_build;
200	}
201	read_queue_e->sinfo_stream = stream_no;
202	read_queue_e->sinfo_ssn = stream_seq;
203	read_queue_e->sinfo_flags = (flags << 8);
204	read_queue_e->sinfo_ppid = ppid;
205	read_queue_e->sinfo_context = stcb->asoc.context;
206	read_queue_e->sinfo_timetolive = 0;
207	read_queue_e->sinfo_tsn = tsn;
208	read_queue_e->sinfo_cumtsn = tsn;
209	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
210	read_queue_e->whoFrom = net;
211	read_queue_e->length = 0;
212	atomic_add_int(&net->ref_count, 1);
213	read_queue_e->data = dm;
214	read_queue_e->spec_flags = 0;
215	read_queue_e->tail_mbuf = NULL;
216	read_queue_e->stcb = stcb;
217	read_queue_e->port_from = stcb->rport;
218	read_queue_e->do_not_ref_stcb = 0;
219	read_queue_e->end_added = 0;
220	read_queue_e->pdapi_aborted = 0;
221failed_build:
222	return (read_queue_e);
223}
224
225
226/*
227 * Build out our readq entry based on the incoming packet.
228 */
229static struct sctp_queued_to_read *
230sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
231    struct sctp_tmit_chunk *chk)
232{
233	struct sctp_queued_to_read *read_queue_e = NULL;
234
235	sctp_alloc_a_readq(stcb, read_queue_e);
236	if (read_queue_e == NULL) {
237		goto failed_build;
238	}
239	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
240	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
241	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
242	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
243	read_queue_e->sinfo_context = stcb->asoc.context;
244	read_queue_e->sinfo_timetolive = 0;
245	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
246	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
247	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
248	read_queue_e->whoFrom = chk->whoTo;
249	read_queue_e->length = 0;
250	atomic_add_int(&chk->whoTo->ref_count, 1);
251	read_queue_e->data = chk->data;
252	read_queue_e->tail_mbuf = NULL;
253	read_queue_e->stcb = stcb;
254	read_queue_e->port_from = stcb->rport;
255	read_queue_e->spec_flags = 0;
256	read_queue_e->do_not_ref_stcb = 0;
257	read_queue_e->end_added = 0;
258	read_queue_e->pdapi_aborted = 0;
259failed_build:
260	return (read_queue_e);
261}
262
263
264struct mbuf *
265sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
266    struct sctp_sndrcvinfo *sinfo)
267{
268	struct sctp_sndrcvinfo *outinfo;
269	struct cmsghdr *cmh;
270	struct mbuf *ret;
271	int len;
272	int use_extended = 0;
273
274	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
275		/* user does not want the sndrcv ctl */
276		return (NULL);
277	}
278	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
279		use_extended = 1;
280		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
281	} else {
282		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
283	}
284
285
286	ret = sctp_get_mbuf_for_msg(len,
287	    0, M_DONTWAIT, 1, MT_DATA);
288
289	if (ret == NULL) {
290		/* No space */
291		return (ret);
292	}
293	/* We need a CMSG header followed by the struct  */
294	cmh = mtod(ret, struct cmsghdr *);
295	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
296	cmh->cmsg_level = IPPROTO_SCTP;
297	if (use_extended) {
298		cmh->cmsg_type = SCTP_EXTRCV;
299		cmh->cmsg_len = len;
300		memcpy(outinfo, sinfo, len);
301	} else {
302		cmh->cmsg_type = SCTP_SNDRCV;
303		cmh->cmsg_len = len;
304		*outinfo = *sinfo;
305	}
306	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
307	return (ret);
308}
309
310
311/*
312 * We are delivering currently from the reassembly queue. We must continue to
313 * deliver until we either: 1) run out of space. 2) run out of sequential
314 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
315 */
316static void
317sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
318{
319	struct sctp_tmit_chunk *chk;
320	uint16_t nxt_todel;
321	uint16_t stream_no;
322	int end = 0;
323	int cntDel;
324
325	cntDel = stream_no = 0;
326	struct sctp_queued_to_read *control, *ctl, *ctlat;
327
328	if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
329	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
330	    ) {
331		/* socket above is long gone */
332		asoc->fragmented_delivery_inprogress = 0;
333		chk = TAILQ_FIRST(&asoc->reasmqueue);
334		while (chk) {
335			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
336			asoc->size_on_reasm_queue -= chk->send_size;
337			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
338			/*
339			 * Lose the data pointer, since its in the socket
340			 * buffer
341			 */
342			if (chk->data) {
343				sctp_m_freem(chk->data);
344				chk->data = NULL;
345			}
346			/* Now free the address and data */
347			sctp_free_remote_addr(chk->whoTo);
348			sctp_free_a_chunk(stcb, chk);
349			chk = TAILQ_FIRST(&asoc->reasmqueue);
350		}
351		return;
352	}
353	SCTP_TCB_LOCK_ASSERT(stcb);
354	do {
355		chk = TAILQ_FIRST(&asoc->reasmqueue);
356		if (chk == NULL) {
357			return;
358		}
359		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
360			/* Can't deliver more :< */
361			return;
362		}
363		stream_no = chk->rec.data.stream_number;
364		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
365		if (nxt_todel != chk->rec.data.stream_seq &&
366		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
367			/*
368			 * Not the next sequence to deliver in its stream OR
369			 * unordered
370			 */
371			return;
372		}
373		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
374
375			control = sctp_build_readq_entry_chk(stcb, chk);
376			if (control == NULL) {
377				/* out of memory? */
378				return;
379			}
380			/* save it off for our future deliveries */
381			stcb->asoc.control_pdapi = control;
382			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
383				end = 1;
384			else
385				end = 0;
386			sctp_add_to_readq(stcb->sctp_ep,
387			    stcb, control, &stcb->sctp_socket->so_rcv, end);
388			cntDel++;
389		} else {
390			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
391				end = 1;
392			else
393				end = 0;
394			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
395			    stcb->asoc.control_pdapi,
396			    chk->data, end, chk->rec.data.TSN_seq,
397			    &stcb->sctp_socket->so_rcv)) {
398				/*
399				 * something is very wrong, either
400				 * control_pdapi is NULL, or the tail_mbuf
401				 * is corrupt, or there is a EOM already on
402				 * the mbuf chain.
403				 */
404				if (stcb->asoc.control_pdapi == NULL) {
405					panic("This should not happen control_pdapi NULL?");
406				}
407				if (stcb->asoc.control_pdapi->tail_mbuf == NULL) {
408					panic("This should not happen, tail_mbuf not being maintained?");
409				}
410				/* if we did not panic, it was a EOM */
411				panic("Bad chunking ??");
412			}
413			cntDel++;
414		}
415		/* pull it we did it */
416		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
417		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
418			asoc->fragmented_delivery_inprogress = 0;
419			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
420				asoc->strmin[stream_no].last_sequence_delivered++;
421			}
422			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
423				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
424			}
425		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
426			/*
427			 * turn the flag back on since we just  delivered
428			 * yet another one.
429			 */
430			asoc->fragmented_delivery_inprogress = 1;
431		}
432		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
433		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
434		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
435		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
436
437		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
438		asoc->size_on_reasm_queue -= chk->send_size;
439		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
440		/* free up the chk */
441		chk->data = NULL;
442		sctp_free_remote_addr(chk->whoTo);
443		sctp_free_a_chunk(stcb, chk);
444
445		if (asoc->fragmented_delivery_inprogress == 0) {
446			/*
447			 * Now lets see if we can deliver the next one on
448			 * the stream
449			 */
450			uint16_t nxt_todel;
451			struct sctp_stream_in *strm;
452
453			strm = &asoc->strmin[stream_no];
454			nxt_todel = strm->last_sequence_delivered + 1;
455			ctl = TAILQ_FIRST(&strm->inqueue);
456			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
457				while (ctl != NULL) {
458					/* Deliver more if we can. */
459					if (nxt_todel == ctl->sinfo_ssn) {
460						ctlat = TAILQ_NEXT(ctl, next);
461						TAILQ_REMOVE(&strm->inqueue, ctl, next);
462						asoc->size_on_all_streams -= ctl->length;
463						sctp_ucount_decr(asoc->cnt_on_all_streams);
464						strm->last_sequence_delivered++;
465						sctp_add_to_readq(stcb->sctp_ep, stcb,
466						    ctl,
467						    &stcb->sctp_socket->so_rcv, 1);
468						ctl = ctlat;
469					} else {
470						break;
471					}
472					nxt_todel = strm->last_sequence_delivered + 1;
473				}
474			}
475			break;
476		}
477		chk = TAILQ_FIRST(&asoc->reasmqueue);
478	} while (chk);
479}
480
481/*
482 * Queue the chunk either right into the socket buffer if it is the next one
483 * to go OR put it in the correct place in the delivery queue.  If we do
484 * append to the so_buf, keep doing so until we are out of order. One big
485 * question still remains, what to do when the socket buffer is FULL??
486 */
487static void
488sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
489    struct sctp_queued_to_read *control, int *abort_flag)
490{
491	/*
492	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
493	 * all the data in one stream this could happen quite rapidly. One
494	 * could use the TSN to keep track of things, but this scheme breaks
495	 * down in the other type of stream useage that could occur. Send a
496	 * single msg to stream 0, send 4Billion messages to stream 1, now
497	 * send a message to stream 0. You have a situation where the TSN
498	 * has wrapped but not in the stream. Is this worth worrying about
499	 * or should we just change our queue sort at the bottom to be by
500	 * TSN.
501	 *
502	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
503	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
504	 * assignment this could happen... and I don't see how this would be
505	 * a violation. So for now I am undecided an will leave the sort by
506	 * SSN alone. Maybe a hybred approach is the answer
507	 *
508	 */
509	struct sctp_stream_in *strm;
510	struct sctp_queued_to_read *at;
511	int queue_needed;
512	uint16_t nxt_todel;
513	struct mbuf *oper;
514
515	queue_needed = 1;
516	asoc->size_on_all_streams += control->length;
517	sctp_ucount_incr(asoc->cnt_on_all_streams);
518	strm = &asoc->strmin[control->sinfo_stream];
519	nxt_todel = strm->last_sequence_delivered + 1;
520#ifdef SCTP_STR_LOGGING
521	sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
522#endif
523#ifdef SCTP_DEBUG
524	if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
525		printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
526		    (uint32_t) control->sinfo_stream,
527		    (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel);
528	}
529#endif
530	if (compare_with_wrap(strm->last_sequence_delivered,
531	    control->sinfo_ssn, MAX_SEQ) ||
532	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
533		/* The incoming sseq is behind where we last delivered? */
534#ifdef SCTP_DEBUG
535		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
536			printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
537			    control->sinfo_ssn,
538			    strm->last_sequence_delivered);
539		}
540#endif
541		/*
542		 * throw it in the stream so it gets cleaned up in
543		 * association destruction
544		 */
545		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
546		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
547		    0, M_DONTWAIT, 1, MT_DATA);
548		if (oper) {
549			struct sctp_paramhdr *ph;
550			uint32_t *ippp;
551
552			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
553			    (sizeof(uint32_t) * 3);
554			ph = mtod(oper, struct sctp_paramhdr *);
555			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
556			ph->param_length = htons(SCTP_BUF_LEN(oper));
557			ippp = (uint32_t *) (ph + 1);
558			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
559			ippp++;
560			*ippp = control->sinfo_tsn;
561			ippp++;
562			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
563		}
564		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
565		sctp_abort_an_association(stcb->sctp_ep, stcb,
566		    SCTP_PEER_FAULTY, oper);
567
568		*abort_flag = 1;
569		return;
570
571	}
572	if (nxt_todel == control->sinfo_ssn) {
573		/* can be delivered right away? */
574#ifdef SCTP_STR_LOGGING
575		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
576#endif
577		queue_needed = 0;
578		asoc->size_on_all_streams -= control->length;
579		sctp_ucount_decr(asoc->cnt_on_all_streams);
580		strm->last_sequence_delivered++;
581		sctp_add_to_readq(stcb->sctp_ep, stcb,
582		    control,
583		    &stcb->sctp_socket->so_rcv, 1);
584		control = TAILQ_FIRST(&strm->inqueue);
585		while (control != NULL) {
586			/* all delivered */
587			nxt_todel = strm->last_sequence_delivered + 1;
588			if (nxt_todel == control->sinfo_ssn) {
589				at = TAILQ_NEXT(control, next);
590				TAILQ_REMOVE(&strm->inqueue, control, next);
591				asoc->size_on_all_streams -= control->length;
592				sctp_ucount_decr(asoc->cnt_on_all_streams);
593				strm->last_sequence_delivered++;
594				/*
595				 * We ignore the return of deliver_data here
596				 * since we always can hold the chunk on the
597				 * d-queue. And we have a finite number that
598				 * can be delivered from the strq.
599				 */
600#ifdef SCTP_STR_LOGGING
601				sctp_log_strm_del(control, NULL,
602				    SCTP_STR_LOG_FROM_IMMED_DEL);
603#endif
604				sctp_add_to_readq(stcb->sctp_ep, stcb,
605				    control,
606				    &stcb->sctp_socket->so_rcv, 1);
607				control = at;
608				continue;
609			}
610			break;
611		}
612	}
613	if (queue_needed) {
614		/*
615		 * Ok, we did not deliver this guy, find the correct place
616		 * to put it on the queue.
617		 */
618		if (TAILQ_EMPTY(&strm->inqueue)) {
619			/* Empty queue */
620#ifdef SCTP_STR_LOGGING
621			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
622#endif
623			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
624		} else {
625			TAILQ_FOREACH(at, &strm->inqueue, next) {
626				if (compare_with_wrap(at->sinfo_ssn,
627				    control->sinfo_ssn, MAX_SEQ)) {
628					/*
629					 * one in queue is bigger than the
630					 * new one, insert before this one
631					 */
632#ifdef SCTP_STR_LOGGING
633					sctp_log_strm_del(control, at,
634					    SCTP_STR_LOG_FROM_INSERT_MD);
635#endif
636					TAILQ_INSERT_BEFORE(at, control, next);
637					break;
638				} else if (at->sinfo_ssn == control->sinfo_ssn) {
639					/*
640					 * Gak, He sent me a duplicate str
641					 * seq number
642					 */
643					/*
644					 * foo bar, I guess I will just free
645					 * this new guy, should we abort
646					 * too? FIX ME MAYBE? Or it COULD be
647					 * that the SSN's have wrapped.
648					 * Maybe I should compare to TSN
649					 * somehow... sigh for now just blow
650					 * away the chunk!
651					 */
652
653					if (control->data)
654						sctp_m_freem(control->data);
655					control->data = NULL;
656					asoc->size_on_all_streams -= control->length;
657					sctp_ucount_decr(asoc->cnt_on_all_streams);
658					sctp_free_remote_addr(control->whoFrom);
659					sctp_free_a_readq(stcb, control);
660					return;
661				} else {
662					if (TAILQ_NEXT(at, next) == NULL) {
663						/*
664						 * We are at the end, insert
665						 * it after this one
666						 */
667#ifdef SCTP_STR_LOGGING
668						sctp_log_strm_del(control, at,
669						    SCTP_STR_LOG_FROM_INSERT_TL);
670#endif
671						TAILQ_INSERT_AFTER(&strm->inqueue,
672						    at, control, next);
673						break;
674					}
675				}
676			}
677		}
678	}
679}
680
681/*
682 * Returns two things: You get the total size of the deliverable parts of the
683 * first fragmented message on the reassembly queue. And you get a 1 back if
684 * all of the message is ready or a 0 back if the message is still incomplete
685 */
686static int
687sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
688{
689	struct sctp_tmit_chunk *chk;
690	uint32_t tsn;
691
692	*t_size = 0;
693	chk = TAILQ_FIRST(&asoc->reasmqueue);
694	if (chk == NULL) {
695		/* nothing on the queue */
696		return (0);
697	}
698	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
699		/* Not a first on the queue */
700		return (0);
701	}
702	tsn = chk->rec.data.TSN_seq;
703	while (chk) {
704		if (tsn != chk->rec.data.TSN_seq) {
705			return (0);
706		}
707		*t_size += chk->send_size;
708		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
709			return (1);
710		}
711		tsn++;
712		chk = TAILQ_NEXT(chk, sctp_next);
713	}
714	return (0);
715}
716
717static void
718sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
719{
720	struct sctp_tmit_chunk *chk;
721	uint16_t nxt_todel;
722	uint32_t tsize;
723
724doit_again:
725	chk = TAILQ_FIRST(&asoc->reasmqueue);
726	if (chk == NULL) {
727		/* Huh? */
728		asoc->size_on_reasm_queue = 0;
729		asoc->cnt_on_reasm_queue = 0;
730		return;
731	}
732	if (asoc->fragmented_delivery_inprogress == 0) {
733		nxt_todel =
734		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
735		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
736		    (nxt_todel == chk->rec.data.stream_seq ||
737		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
738			/*
739			 * Yep the first one is here and its ok to deliver
740			 * but should we?
741			 */
742			if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
743			    (tsize > stcb->sctp_ep->partial_delivery_point))) {
744
745				/*
746				 * Yes, we setup to start reception, by
747				 * backing down the TSN just in case we
748				 * can't deliver. If we
749				 */
750				asoc->fragmented_delivery_inprogress = 1;
751				asoc->tsn_last_delivered =
752				    chk->rec.data.TSN_seq - 1;
753				asoc->str_of_pdapi =
754				    chk->rec.data.stream_number;
755				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
756				asoc->pdapi_ppid = chk->rec.data.payloadtype;
757				asoc->fragment_flags = chk->rec.data.rcv_flags;
758				sctp_service_reassembly(stcb, asoc);
759			}
760		}
761	} else {
762		/*
763		 * Service re-assembly will deliver stream data queued at
764		 * the end of fragmented delivery.. but it wont know to go
765		 * back and call itself again... we do that here with the
766		 * got doit_again
767		 */
768		sctp_service_reassembly(stcb, asoc);
769		if (asoc->fragmented_delivery_inprogress == 0) {
770			/*
771			 * finished our Fragmented delivery, could be more
772			 * waiting?
773			 */
774			goto doit_again;
775		}
776	}
777}
778
779/*
780 * Dump onto the re-assembly queue, in its proper place. After dumping on the
781 * queue, see if anthing can be delivered. If so pull it off (or as much as
782 * we can. If we run out of space then we must dump what we can and set the
783 * appropriate flag to say we queued what we could.
784 */
785static void
786sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
787    struct sctp_tmit_chunk *chk, int *abort_flag)
788{
789	struct mbuf *oper;
790	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
791	u_char last_flags;
792	struct sctp_tmit_chunk *at, *prev, *next;
793
794	prev = next = NULL;
795	cum_ackp1 = asoc->tsn_last_delivered + 1;
796	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
797		/* This is the first one on the queue */
798		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
799		/*
800		 * we do not check for delivery of anything when only one
801		 * fragment is here
802		 */
803		asoc->size_on_reasm_queue = chk->send_size;
804		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
805		if (chk->rec.data.TSN_seq == cum_ackp1) {
806			if (asoc->fragmented_delivery_inprogress == 0 &&
807			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
808			    SCTP_DATA_FIRST_FRAG) {
809				/*
810				 * An empty queue, no delivery inprogress,
811				 * we hit the next one and it does NOT have
812				 * a FIRST fragment mark.
813				 */
814#ifdef SCTP_DEBUG
815				if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
816					printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
817				}
818#endif
819				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
820				    0, M_DONTWAIT, 1, MT_DATA);
821
822				if (oper) {
823					struct sctp_paramhdr *ph;
824					uint32_t *ippp;
825
826					SCTP_BUF_LEN(oper) =
827					    sizeof(struct sctp_paramhdr) +
828					    (sizeof(uint32_t) * 3);
829					ph = mtod(oper, struct sctp_paramhdr *);
830					ph->param_type =
831					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
832					ph->param_length = htons(SCTP_BUF_LEN(oper));
833					ippp = (uint32_t *) (ph + 1);
834					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
835					ippp++;
836					*ippp = chk->rec.data.TSN_seq;
837					ippp++;
838					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
839
840				}
841				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
842				sctp_abort_an_association(stcb->sctp_ep, stcb,
843				    SCTP_PEER_FAULTY, oper);
844				*abort_flag = 1;
845			} else if (asoc->fragmented_delivery_inprogress &&
846			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
847				/*
848				 * We are doing a partial delivery and the
849				 * NEXT chunk MUST be either the LAST or
850				 * MIDDLE fragment NOT a FIRST
851				 */
852#ifdef SCTP_DEBUG
853				if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
854					printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
855				}
856#endif
857				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
858				    0, M_DONTWAIT, 1, MT_DATA);
859				if (oper) {
860					struct sctp_paramhdr *ph;
861					uint32_t *ippp;
862
863					SCTP_BUF_LEN(oper) =
864					    sizeof(struct sctp_paramhdr) +
865					    (3 * sizeof(uint32_t));
866					ph = mtod(oper, struct sctp_paramhdr *);
867					ph->param_type =
868					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
869					ph->param_length = htons(SCTP_BUF_LEN(oper));
870					ippp = (uint32_t *) (ph + 1);
871					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
872					ippp++;
873					*ippp = chk->rec.data.TSN_seq;
874					ippp++;
875					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
876				}
877				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
878				sctp_abort_an_association(stcb->sctp_ep, stcb,
879				    SCTP_PEER_FAULTY, oper);
880				*abort_flag = 1;
881			} else if (asoc->fragmented_delivery_inprogress) {
882				/*
883				 * Here we are ok with a MIDDLE or LAST
884				 * piece
885				 */
886				if (chk->rec.data.stream_number !=
887				    asoc->str_of_pdapi) {
888					/* Got to be the right STR No */
889#ifdef SCTP_DEBUG
890					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
891						printf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
892						    chk->rec.data.stream_number,
893						    asoc->str_of_pdapi);
894					}
895#endif
896					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
897					    0, M_DONTWAIT, 1, MT_DATA);
898					if (oper) {
899						struct sctp_paramhdr *ph;
900						uint32_t *ippp;
901
902						SCTP_BUF_LEN(oper) =
903						    sizeof(struct sctp_paramhdr) +
904						    (sizeof(uint32_t) * 3);
905						ph = mtod(oper,
906						    struct sctp_paramhdr *);
907						ph->param_type =
908						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
909						ph->param_length =
910						    htons(SCTP_BUF_LEN(oper));
911						ippp = (uint32_t *) (ph + 1);
912						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
913						ippp++;
914						*ippp = chk->rec.data.TSN_seq;
915						ippp++;
916						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
917					}
918					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
919					sctp_abort_an_association(stcb->sctp_ep,
920					    stcb, SCTP_PEER_FAULTY, oper);
921					*abort_flag = 1;
922				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
923					    SCTP_DATA_UNORDERED &&
924					    chk->rec.data.stream_seq !=
925				    asoc->ssn_of_pdapi) {
926					/* Got to be the right STR Seq */
927#ifdef SCTP_DEBUG
928					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
929						printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
930						    chk->rec.data.stream_seq,
931						    asoc->ssn_of_pdapi);
932					}
933#endif
934					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
935					    0, M_DONTWAIT, 1, MT_DATA);
936					if (oper) {
937						struct sctp_paramhdr *ph;
938						uint32_t *ippp;
939
940						SCTP_BUF_LEN(oper) =
941						    sizeof(struct sctp_paramhdr) +
942						    (3 * sizeof(uint32_t));
943						ph = mtod(oper,
944						    struct sctp_paramhdr *);
945						ph->param_type =
946						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
947						ph->param_length =
948						    htons(SCTP_BUF_LEN(oper));
949						ippp = (uint32_t *) (ph + 1);
950						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
951						ippp++;
952						*ippp = chk->rec.data.TSN_seq;
953						ippp++;
954						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
955
956					}
957					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
958					sctp_abort_an_association(stcb->sctp_ep,
959					    stcb, SCTP_PEER_FAULTY, oper);
960					*abort_flag = 1;
961				}
962			}
963		}
964		return;
965	}
966	/* Find its place */
967	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
968		if (compare_with_wrap(at->rec.data.TSN_seq,
969		    chk->rec.data.TSN_seq, MAX_TSN)) {
970			/*
971			 * one in queue is bigger than the new one, insert
972			 * before this one
973			 */
974			/* A check */
975			asoc->size_on_reasm_queue += chk->send_size;
976			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
977			next = at;
978			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
979			break;
980		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
981			/* Gak, He sent me a duplicate str seq number */
982			/*
983			 * foo bar, I guess I will just free this new guy,
984			 * should we abort too? FIX ME MAYBE? Or it COULD be
985			 * that the SSN's have wrapped. Maybe I should
986			 * compare to TSN somehow... sigh for now just blow
987			 * away the chunk!
988			 */
989			if (chk->data) {
990				sctp_m_freem(chk->data);
991				chk->data = NULL;
992			}
993			sctp_free_remote_addr(chk->whoTo);
994			sctp_free_a_chunk(stcb, chk);
995			return;
996		} else {
997			last_flags = at->rec.data.rcv_flags;
998			last_tsn = at->rec.data.TSN_seq;
999			prev = at;
1000			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1001				/*
1002				 * We are at the end, insert it after this
1003				 * one
1004				 */
1005				/* check it first */
1006				asoc->size_on_reasm_queue += chk->send_size;
1007				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1008				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1009				break;
1010			}
1011		}
1012	}
1013	/* Now the audits */
1014	if (prev) {
1015		prev_tsn = chk->rec.data.TSN_seq - 1;
1016		if (prev_tsn == prev->rec.data.TSN_seq) {
1017			/*
1018			 * Ok the one I am dropping onto the end is the
1019			 * NEXT. A bit of valdiation here.
1020			 */
1021			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1022			    SCTP_DATA_FIRST_FRAG ||
1023			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1024			    SCTP_DATA_MIDDLE_FRAG) {
1025				/*
1026				 * Insert chk MUST be a MIDDLE or LAST
1027				 * fragment
1028				 */
1029				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1030				    SCTP_DATA_FIRST_FRAG) {
1031#ifdef SCTP_DEBUG
1032					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1033						printf("Prev check - It can be a midlle or last but not a first\n");
1034						printf("Gak, Evil plot, it's a FIRST!\n");
1035					}
1036#endif
1037					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1038					    0, M_DONTWAIT, 1, MT_DATA);
1039					if (oper) {
1040						struct sctp_paramhdr *ph;
1041						uint32_t *ippp;
1042
1043						SCTP_BUF_LEN(oper) =
1044						    sizeof(struct sctp_paramhdr) +
1045						    (3 * sizeof(uint32_t));
1046						ph = mtod(oper,
1047						    struct sctp_paramhdr *);
1048						ph->param_type =
1049						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1050						ph->param_length =
1051						    htons(SCTP_BUF_LEN(oper));
1052						ippp = (uint32_t *) (ph + 1);
1053						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1054						ippp++;
1055						*ippp = chk->rec.data.TSN_seq;
1056						ippp++;
1057						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1058
1059					}
1060					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1061					sctp_abort_an_association(stcb->sctp_ep,
1062					    stcb, SCTP_PEER_FAULTY, oper);
1063					*abort_flag = 1;
1064					return;
1065				}
1066				if (chk->rec.data.stream_number !=
1067				    prev->rec.data.stream_number) {
1068					/*
1069					 * Huh, need the correct STR here,
1070					 * they must be the same.
1071					 */
1072#ifdef SCTP_DEBUG
1073					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1074						printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1075						    chk->rec.data.stream_number,
1076						    prev->rec.data.stream_number);
1077					}
1078#endif
1079					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1080					    0, M_DONTWAIT, 1, MT_DATA);
1081					if (oper) {
1082						struct sctp_paramhdr *ph;
1083						uint32_t *ippp;
1084
1085						SCTP_BUF_LEN(oper) =
1086						    sizeof(struct sctp_paramhdr) +
1087						    (3 * sizeof(uint32_t));
1088						ph = mtod(oper,
1089						    struct sctp_paramhdr *);
1090						ph->param_type =
1091						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1092						ph->param_length =
1093						    htons(SCTP_BUF_LEN(oper));
1094						ippp = (uint32_t *) (ph + 1);
1095						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1096						ippp++;
1097						*ippp = chk->rec.data.TSN_seq;
1098						ippp++;
1099						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1100					}
1101					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1102					sctp_abort_an_association(stcb->sctp_ep,
1103					    stcb, SCTP_PEER_FAULTY, oper);
1104
1105					*abort_flag = 1;
1106					return;
1107				}
1108				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1109				    chk->rec.data.stream_seq !=
1110				    prev->rec.data.stream_seq) {
1111					/*
1112					 * Huh, need the correct STR here,
1113					 * they must be the same.
1114					 */
1115#ifdef SCTP_DEBUG
1116					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1117						printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1118						    chk->rec.data.stream_seq,
1119						    prev->rec.data.stream_seq);
1120					}
1121#endif
1122					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1123					    0, M_DONTWAIT, 1, MT_DATA);
1124					if (oper) {
1125						struct sctp_paramhdr *ph;
1126						uint32_t *ippp;
1127
1128						SCTP_BUF_LEN(oper) =
1129						    sizeof(struct sctp_paramhdr) +
1130						    (3 * sizeof(uint32_t));
1131						ph = mtod(oper,
1132						    struct sctp_paramhdr *);
1133						ph->param_type =
1134						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1135						ph->param_length =
1136						    htons(SCTP_BUF_LEN(oper));
1137						ippp = (uint32_t *) (ph + 1);
1138						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1139						ippp++;
1140						*ippp = chk->rec.data.TSN_seq;
1141						ippp++;
1142						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1143					}
1144					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1145					sctp_abort_an_association(stcb->sctp_ep,
1146					    stcb, SCTP_PEER_FAULTY, oper);
1147
1148					*abort_flag = 1;
1149					return;
1150				}
1151			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1152			    SCTP_DATA_LAST_FRAG) {
1153				/* Insert chk MUST be a FIRST */
1154				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1155				    SCTP_DATA_FIRST_FRAG) {
1156#ifdef SCTP_DEBUG
1157					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1158						printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1159					}
1160#endif
1161					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1162					    0, M_DONTWAIT, 1, MT_DATA);
1163					if (oper) {
1164						struct sctp_paramhdr *ph;
1165						uint32_t *ippp;
1166
1167						SCTP_BUF_LEN(oper) =
1168						    sizeof(struct sctp_paramhdr) +
1169						    (3 * sizeof(uint32_t));
1170						ph = mtod(oper,
1171						    struct sctp_paramhdr *);
1172						ph->param_type =
1173						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1174						ph->param_length =
1175						    htons(SCTP_BUF_LEN(oper));
1176						ippp = (uint32_t *) (ph + 1);
1177						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1178						ippp++;
1179						*ippp = chk->rec.data.TSN_seq;
1180						ippp++;
1181						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1182
1183					}
1184					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1185					sctp_abort_an_association(stcb->sctp_ep,
1186					    stcb, SCTP_PEER_FAULTY, oper);
1187
1188					*abort_flag = 1;
1189					return;
1190				}
1191			}
1192		}
1193	}
1194	if (next) {
1195		post_tsn = chk->rec.data.TSN_seq + 1;
1196		if (post_tsn == next->rec.data.TSN_seq) {
1197			/*
1198			 * Ok the one I am inserting ahead of is my NEXT
1199			 * one. A bit of valdiation here.
1200			 */
1201			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1202				/* Insert chk MUST be a last fragment */
1203				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1204				    != SCTP_DATA_LAST_FRAG) {
1205#ifdef SCTP_DEBUG
1206					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1207						printf("Next chk - Next is FIRST, we must be LAST\n");
1208						printf("Gak, Evil plot, its not a last!\n");
1209					}
1210#endif
1211					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1212					    0, M_DONTWAIT, 1, MT_DATA);
1213					if (oper) {
1214						struct sctp_paramhdr *ph;
1215						uint32_t *ippp;
1216
1217						SCTP_BUF_LEN(oper) =
1218						    sizeof(struct sctp_paramhdr) +
1219						    (3 * sizeof(uint32_t));
1220						ph = mtod(oper,
1221						    struct sctp_paramhdr *);
1222						ph->param_type =
1223						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1224						ph->param_length =
1225						    htons(SCTP_BUF_LEN(oper));
1226						ippp = (uint32_t *) (ph + 1);
1227						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1228						ippp++;
1229						*ippp = chk->rec.data.TSN_seq;
1230						ippp++;
1231						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1232					}
1233					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1234					sctp_abort_an_association(stcb->sctp_ep,
1235					    stcb, SCTP_PEER_FAULTY, oper);
1236
1237					*abort_flag = 1;
1238					return;
1239				}
1240			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1241				    SCTP_DATA_MIDDLE_FRAG ||
1242				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1243			    SCTP_DATA_LAST_FRAG) {
1244				/*
1245				 * Insert chk CAN be MIDDLE or FIRST NOT
1246				 * LAST
1247				 */
1248				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1249				    SCTP_DATA_LAST_FRAG) {
1250#ifdef SCTP_DEBUG
1251					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1252						printf("Next chk - Next is a MIDDLE/LAST\n");
1253						printf("Gak, Evil plot, new prev chunk is a LAST\n");
1254					}
1255#endif
1256					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1257					    0, M_DONTWAIT, 1, MT_DATA);
1258					if (oper) {
1259						struct sctp_paramhdr *ph;
1260						uint32_t *ippp;
1261
1262						SCTP_BUF_LEN(oper) =
1263						    sizeof(struct sctp_paramhdr) +
1264						    (3 * sizeof(uint32_t));
1265						ph = mtod(oper,
1266						    struct sctp_paramhdr *);
1267						ph->param_type =
1268						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1269						ph->param_length =
1270						    htons(SCTP_BUF_LEN(oper));
1271						ippp = (uint32_t *) (ph + 1);
1272						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1273						ippp++;
1274						*ippp = chk->rec.data.TSN_seq;
1275						ippp++;
1276						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1277
1278					}
1279					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1280					sctp_abort_an_association(stcb->sctp_ep,
1281					    stcb, SCTP_PEER_FAULTY, oper);
1282
1283					*abort_flag = 1;
1284					return;
1285				}
1286				if (chk->rec.data.stream_number !=
1287				    next->rec.data.stream_number) {
1288					/*
1289					 * Huh, need the correct STR here,
1290					 * they must be the same.
1291					 */
1292#ifdef SCTP_DEBUG
1293					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1294						printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1295						    chk->rec.data.stream_number,
1296						    next->rec.data.stream_number);
1297					}
1298#endif
1299					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1300					    0, M_DONTWAIT, 1, MT_DATA);
1301					if (oper) {
1302						struct sctp_paramhdr *ph;
1303						uint32_t *ippp;
1304
1305						SCTP_BUF_LEN(oper) =
1306						    sizeof(struct sctp_paramhdr) +
1307						    (3 * sizeof(uint32_t));
1308						ph = mtod(oper,
1309						    struct sctp_paramhdr *);
1310						ph->param_type =
1311						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1312						ph->param_length =
1313						    htons(SCTP_BUF_LEN(oper));
1314						ippp = (uint32_t *) (ph + 1);
1315						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1316						ippp++;
1317						*ippp = chk->rec.data.TSN_seq;
1318						ippp++;
1319						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1320
1321					}
1322					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1323					sctp_abort_an_association(stcb->sctp_ep,
1324					    stcb, SCTP_PEER_FAULTY, oper);
1325
1326					*abort_flag = 1;
1327					return;
1328				}
1329				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1330				    chk->rec.data.stream_seq !=
1331				    next->rec.data.stream_seq) {
1332					/*
1333					 * Huh, need the correct STR here,
1334					 * they must be the same.
1335					 */
1336#ifdef SCTP_DEBUG
1337					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1338						printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1339						    chk->rec.data.stream_seq,
1340						    next->rec.data.stream_seq);
1341					}
1342#endif
1343					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1344					    0, M_DONTWAIT, 1, MT_DATA);
1345					if (oper) {
1346						struct sctp_paramhdr *ph;
1347						uint32_t *ippp;
1348
1349						SCTP_BUF_LEN(oper) =
1350						    sizeof(struct sctp_paramhdr) +
1351						    (3 * sizeof(uint32_t));
1352						ph = mtod(oper,
1353						    struct sctp_paramhdr *);
1354						ph->param_type =
1355						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1356						ph->param_length =
1357						    htons(SCTP_BUF_LEN(oper));
1358						ippp = (uint32_t *) (ph + 1);
1359						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1360						ippp++;
1361						*ippp = chk->rec.data.TSN_seq;
1362						ippp++;
1363						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1364					}
1365					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1366					sctp_abort_an_association(stcb->sctp_ep,
1367					    stcb, SCTP_PEER_FAULTY, oper);
1368
1369					*abort_flag = 1;
1370					return;
1371
1372				}
1373			}
1374		}
1375	}
1376	/* Do we need to do some delivery? check */
1377	sctp_deliver_reasm_check(stcb, asoc);
1378}
1379
1380/*
1381 * This is an unfortunate routine. It checks to make sure a evil guy is not
1382 * stuffing us full of bad packet fragments. A broken peer could also do this
1383 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1384 * :< more cycles.
1385 */
1386static int
1387sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1388    uint32_t TSN_seq)
1389{
1390	struct sctp_tmit_chunk *at;
1391	uint32_t tsn_est;
1392
1393	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1394		if (compare_with_wrap(TSN_seq,
1395		    at->rec.data.TSN_seq, MAX_TSN)) {
1396			/* is it one bigger? */
1397			tsn_est = at->rec.data.TSN_seq + 1;
1398			if (tsn_est == TSN_seq) {
1399				/* yep. It better be a last then */
1400				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1401				    SCTP_DATA_LAST_FRAG) {
1402					/*
1403					 * Ok this guy belongs next to a guy
1404					 * that is NOT last, it should be a
1405					 * middle/last, not a complete
1406					 * chunk.
1407					 */
1408					return (1);
1409				} else {
1410					/*
1411					 * This guy is ok since its a LAST
1412					 * and the new chunk is a fully
1413					 * self- contained one.
1414					 */
1415					return (0);
1416				}
1417			}
1418		} else if (TSN_seq == at->rec.data.TSN_seq) {
1419			/* Software error since I have a dup? */
1420			return (1);
1421		} else {
1422			/*
1423			 * Ok, 'at' is larger than new chunk but does it
1424			 * need to be right before it.
1425			 */
1426			tsn_est = TSN_seq + 1;
1427			if (tsn_est == at->rec.data.TSN_seq) {
1428				/* Yep, It better be a first */
1429				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1430				    SCTP_DATA_FIRST_FRAG) {
1431					return (1);
1432				} else {
1433					return (0);
1434				}
1435			}
1436		}
1437	}
1438	return (0);
1439}
1440
1441
1442extern unsigned int sctp_max_chunks_on_queue;
1443static int
1444sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1445    struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1446    struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1447    int *break_flag, int last_chunk)
1448{
1449	/* Process a data chunk */
1450	/* struct sctp_tmit_chunk *chk; */
1451	struct sctp_tmit_chunk *chk;
1452	uint32_t tsn, gap;
1453	struct mbuf *dmbuf;
1454	int indx, the_len;
1455	int need_reasm_check = 0;
1456	uint16_t strmno, strmseq;
1457	struct mbuf *oper;
1458	struct sctp_queued_to_read *control;
1459
1460	chk = NULL;
1461	tsn = ntohl(ch->dp.tsn);
1462#ifdef SCTP_MAP_LOGGING
1463	sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1464#endif
1465	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1466	    asoc->cumulative_tsn == tsn) {
1467		/* It is a duplicate */
1468		SCTP_STAT_INCR(sctps_recvdupdata);
1469		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1470			/* Record a dup for the next outbound sack */
1471			asoc->dup_tsns[asoc->numduptsns] = tsn;
1472			asoc->numduptsns++;
1473		}
1474		return (0);
1475	}
1476	/* Calculate the number of TSN's between the base and this TSN */
1477	if (tsn >= asoc->mapping_array_base_tsn) {
1478		gap = tsn - asoc->mapping_array_base_tsn;
1479	} else {
1480		gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1481	}
1482	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1483		/* Can't hold the bit in the mapping at max array, toss it */
1484		return (0);
1485	}
1486	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1487		if (sctp_expand_mapping_array(asoc)) {
1488			/* Can't expand, drop it */
1489			return (0);
1490		}
1491	}
1492	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1493		*high_tsn = tsn;
1494	}
1495	/* See if we have received this one already */
1496	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1497		SCTP_STAT_INCR(sctps_recvdupdata);
1498		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1499			/* Record a dup for the next outbound sack */
1500			asoc->dup_tsns[asoc->numduptsns] = tsn;
1501			asoc->numduptsns++;
1502		}
1503		if (!SCTP_OS_TIMER_PENDING(&asoc->dack_timer.timer)) {
1504			/*
1505			 * By starting the timer we assure that we WILL sack
1506			 * at the end of the packet when sctp_sack_check
1507			 * gets called.
1508			 */
1509			sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1510			    stcb, NULL);
1511		}
1512		return (0);
1513	}
1514	/*
1515	 * Check to see about the GONE flag, duplicates would cause a sack
1516	 * to be sent up above
1517	 */
1518	if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1519	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1520	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1521	    ) {
1522		/*
1523		 * wait a minute, this guy is gone, there is no longer a
1524		 * receiver. Send peer an ABORT!
1525		 */
1526		struct mbuf *op_err;
1527
1528		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1529		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1530		*abort_flag = 1;
1531		return (0);
1532	}
1533	/*
1534	 * Now before going further we see if there is room. If NOT then we
1535	 * MAY let one through only IF this TSN is the one we are waiting
1536	 * for on a partial delivery API.
1537	 */
1538
1539	/* now do the tests */
1540	if (((asoc->cnt_on_all_streams +
1541	    asoc->cnt_on_reasm_queue +
1542	    asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1543	    (((int)asoc->my_rwnd) <= 0)) {
1544		/*
1545		 * When we have NO room in the rwnd we check to make sure
1546		 * the reader is doing its job...
1547		 */
1548		if (stcb->sctp_socket->so_rcv.sb_cc) {
1549			/* some to read, wake-up */
1550			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1551		}
1552		/* now is it in the mapping array of what we have accepted? */
1553		if (compare_with_wrap(tsn,
1554		    asoc->highest_tsn_inside_map, MAX_TSN)) {
1555
1556			/* Nope not in the valid range dump it */
1557#ifdef SCTP_DEBUG
1558			if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1559				printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n",
1560				    (u_long)tsn, (u_long)asoc->my_rwnd,
1561				    sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv));
1562
1563			}
1564#endif
1565			sctp_set_rwnd(stcb, asoc);
1566			if ((asoc->cnt_on_all_streams +
1567			    asoc->cnt_on_reasm_queue +
1568			    asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1569				SCTP_STAT_INCR(sctps_datadropchklmt);
1570			} else {
1571				SCTP_STAT_INCR(sctps_datadroprwnd);
1572			}
1573			indx = *break_flag;
1574			*break_flag = 1;
1575			return (0);
1576		}
1577	}
1578	strmno = ntohs(ch->dp.stream_id);
1579	if (strmno >= asoc->streamincnt) {
1580		struct sctp_paramhdr *phdr;
1581		struct mbuf *mb;
1582
1583		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1584		    0, M_DONTWAIT, 1, MT_DATA);
1585		if (mb != NULL) {
1586			/* add some space up front so prepend will work well */
1587			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1588			phdr = mtod(mb, struct sctp_paramhdr *);
1589			/*
1590			 * Error causes are just param's and this one has
1591			 * two back to back phdr, one with the error type
1592			 * and size, the other with the streamid and a rsvd
1593			 */
1594			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1595			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1596			phdr->param_length =
1597			    htons(sizeof(struct sctp_paramhdr) * 2);
1598			phdr++;
1599			/* We insert the stream in the type field */
1600			phdr->param_type = ch->dp.stream_id;
1601			/* And set the length to 0 for the rsvd field */
1602			phdr->param_length = 0;
1603			sctp_queue_op_err(stcb, mb);
1604		}
1605		SCTP_STAT_INCR(sctps_badsid);
1606		return (0);
1607	}
1608	/*
1609	 * Before we continue lets validate that we are not being fooled by
1610	 * an evil attacker. We can only have 4k chunks based on our TSN
1611	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1612	 * way our stream sequence numbers could have wrapped. We of course
1613	 * only validate the FIRST fragment so the bit must be set.
1614	 */
1615	strmseq = ntohs(ch->dp.stream_sequence);
1616	if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1617	    (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1618	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1619	    strmseq, MAX_SEQ) ||
1620	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1621		/* The incoming sseq is behind where we last delivered? */
1622#ifdef SCTP_DEBUG
1623		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1624			printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1625			    strmseq,
1626			    asoc->strmin[strmno].last_sequence_delivered);
1627		}
1628#endif
1629		/*
1630		 * throw it in the stream so it gets cleaned up in
1631		 * association destruction
1632		 */
1633		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1634		    0, M_DONTWAIT, 1, MT_DATA);
1635		if (oper) {
1636			struct sctp_paramhdr *ph;
1637			uint32_t *ippp;
1638
1639			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1640			    (3 * sizeof(uint32_t));
1641			ph = mtod(oper, struct sctp_paramhdr *);
1642			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1643			ph->param_length = htons(SCTP_BUF_LEN(oper));
1644			ippp = (uint32_t *) (ph + 1);
1645			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1646			ippp++;
1647			*ippp = tsn;
1648			ippp++;
1649			*ippp = ((strmno << 16) | strmseq);
1650
1651		}
1652		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1653		sctp_abort_an_association(stcb->sctp_ep, stcb,
1654		    SCTP_PEER_FAULTY, oper);
1655		*abort_flag = 1;
1656		return (0);
1657	}
1658	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1659	if (last_chunk == 0) {
1660		dmbuf = SCTP_M_COPYM(*m,
1661		    (offset + sizeof(struct sctp_data_chunk)),
1662		    the_len, M_DONTWAIT);
1663#ifdef SCTP_MBUF_LOGGING
1664		{
1665			struct mbuf *mat;
1666
1667			mat = dmbuf;
1668			while (mat) {
1669				if (SCTP_BUF_IS_EXTENDED(mat)) {
1670					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1671				}
1672				mat = SCTP_BUF_NEXT(mat);
1673			}
1674		}
1675#endif
1676	} else {
1677		/* We can steal the last chunk */
1678		int l_len;
1679
1680		dmbuf = *m;
1681		/* lop off the top part */
1682		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1683		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1684			l_len = SCTP_BUF_LEN(dmbuf);
1685		} else {
1686			/*
1687			 * need to count up the size hopefully does not hit
1688			 * this to often :-0
1689			 */
1690			struct mbuf *lat;
1691
1692			l_len = 0;
1693			lat = dmbuf;
1694			while (lat) {
1695				l_len += SCTP_BUF_LEN(lat);
1696				lat = SCTP_BUF_NEXT(lat);
1697			}
1698		}
1699		if (l_len > the_len) {
1700			/* Trim the end round bytes off  too */
1701			m_adj(dmbuf, -(l_len - the_len));
1702		}
1703	}
1704	if (dmbuf == NULL) {
1705		SCTP_STAT_INCR(sctps_nomem);
1706		return (0);
1707	}
1708	if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1709	    asoc->fragmented_delivery_inprogress == 0 &&
1710	    TAILQ_EMPTY(&asoc->resetHead) &&
1711	    ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1712	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1713	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1714		/* Candidate for express delivery */
1715		/*
1716		 * Its not fragmented, No PD-API is up, Nothing in the
1717		 * delivery queue, Its un-ordered OR ordered and the next to
1718		 * deliver AND nothing else is stuck on the stream queue,
1719		 * And there is room for it in the socket buffer. Lets just
1720		 * stuff it up the buffer....
1721		 */
1722
1723		/* It would be nice to avoid this copy if we could :< */
1724		sctp_alloc_a_readq(stcb, control);
1725		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1726		    ch->dp.protocol_id,
1727		    stcb->asoc.context,
1728		    strmno, strmseq,
1729		    ch->ch.chunk_flags,
1730		    dmbuf);
1731		if (control == NULL) {
1732			goto failed_express_del;
1733		}
1734		sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1);
1735		if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1736			/* for ordered, bump what we delivered */
1737			asoc->strmin[strmno].last_sequence_delivered++;
1738		}
1739		SCTP_STAT_INCR(sctps_recvexpress);
1740#ifdef SCTP_STR_LOGGING
1741		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1742		    SCTP_STR_LOG_FROM_EXPRS_DEL);
1743#endif
1744		control = NULL;
1745		goto finish_express_del;
1746	}
1747failed_express_del:
1748	/* If we reach here this is a new chunk */
1749	chk = NULL;
1750	control = NULL;
1751	/* Express for fragmented delivery? */
1752	if ((asoc->fragmented_delivery_inprogress) &&
1753	    (stcb->asoc.control_pdapi) &&
1754	    (asoc->str_of_pdapi == strmno) &&
1755	    (asoc->ssn_of_pdapi == strmseq)
1756	    ) {
1757		control = stcb->asoc.control_pdapi;
1758		if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1759			/* Can't be another first? */
1760			goto failed_pdapi_express_del;
1761		}
1762		if (tsn == (control->sinfo_tsn + 1)) {
1763			/* Yep, we can add it on */
1764			int end = 0;
1765			uint32_t cumack;
1766
1767			if (ch->ch.chunk_flags & SCTP_DATA_LAST_FRAG) {
1768				end = 1;
1769			}
1770			cumack = asoc->cumulative_tsn;
1771			if ((cumack + 1) == tsn)
1772				cumack = tsn;
1773
1774			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1775			    tsn,
1776			    &stcb->sctp_socket->so_rcv)) {
1777				printf("Append fails end:%d\n", end);
1778				goto failed_pdapi_express_del;
1779			}
1780			SCTP_STAT_INCR(sctps_recvexpressm);
1781			control->sinfo_tsn = tsn;
1782			asoc->tsn_last_delivered = tsn;
1783			asoc->fragment_flags = ch->ch.chunk_flags;
1784			asoc->tsn_of_pdapi_last_delivered = tsn;
1785			asoc->last_flags_delivered = ch->ch.chunk_flags;
1786			asoc->last_strm_seq_delivered = strmseq;
1787			asoc->last_strm_no_delivered = strmno;
1788			if (end) {
1789				/* clean up the flags and such */
1790				asoc->fragmented_delivery_inprogress = 0;
1791				if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1792					asoc->strmin[strmno].last_sequence_delivered++;
1793				}
1794				stcb->asoc.control_pdapi = NULL;
1795				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1796					/*
1797					 * There could be another message
1798					 * ready
1799					 */
1800					need_reasm_check = 1;
1801				}
1802			}
1803			control = NULL;
1804			goto finish_express_del;
1805		}
1806	}
1807failed_pdapi_express_del:
1808	control = NULL;
1809	if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1810		sctp_alloc_a_chunk(stcb, chk);
1811		if (chk == NULL) {
1812			/* No memory so we drop the chunk */
1813			SCTP_STAT_INCR(sctps_nomem);
1814			if (last_chunk == 0) {
1815				/* we copied it, free the copy */
1816				sctp_m_freem(dmbuf);
1817			}
1818			return (0);
1819		}
1820		chk->rec.data.TSN_seq = tsn;
1821		chk->no_fr_allowed = 0;
1822		chk->rec.data.stream_seq = strmseq;
1823		chk->rec.data.stream_number = strmno;
1824		chk->rec.data.payloadtype = ch->dp.protocol_id;
1825		chk->rec.data.context = stcb->asoc.context;
1826		chk->rec.data.doing_fast_retransmit = 0;
1827		chk->rec.data.rcv_flags = ch->ch.chunk_flags;
1828		chk->asoc = asoc;
1829		chk->send_size = the_len;
1830		chk->whoTo = net;
1831		atomic_add_int(&net->ref_count, 1);
1832		chk->data = dmbuf;
1833	} else {
1834		sctp_alloc_a_readq(stcb, control);
1835		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1836		    ch->dp.protocol_id,
1837		    stcb->asoc.context,
1838		    strmno, strmseq,
1839		    ch->ch.chunk_flags,
1840		    dmbuf);
1841		if (control == NULL) {
1842			/* No memory so we drop the chunk */
1843			SCTP_STAT_INCR(sctps_nomem);
1844			if (last_chunk == 0) {
1845				/* we copied it, free the copy */
1846				sctp_m_freem(dmbuf);
1847			}
1848			return (0);
1849		}
1850		control->length = the_len;
1851	}
1852
1853	/* Mark it as received */
1854	/* Now queue it where it belongs */
1855	if (control != NULL) {
1856		/* First a sanity check */
1857		if (asoc->fragmented_delivery_inprogress) {
1858			/*
1859			 * Ok, we have a fragmented delivery in progress if
1860			 * this chunk is next to deliver OR belongs in our
1861			 * view to the reassembly, the peer is evil or
1862			 * broken.
1863			 */
1864			uint32_t estimate_tsn;
1865
1866			estimate_tsn = asoc->tsn_last_delivered + 1;
1867			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1868			    (estimate_tsn == control->sinfo_tsn)) {
1869				/* Evil/Broke peer */
1870				sctp_m_freem(control->data);
1871				control->data = NULL;
1872				sctp_free_remote_addr(control->whoFrom);
1873				sctp_free_a_readq(stcb, control);
1874				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1875				    0, M_DONTWAIT, 1, MT_DATA);
1876				if (oper) {
1877					struct sctp_paramhdr *ph;
1878					uint32_t *ippp;
1879
1880					SCTP_BUF_LEN(oper) =
1881					    sizeof(struct sctp_paramhdr) +
1882					    (3 * sizeof(uint32_t));
1883					ph = mtod(oper, struct sctp_paramhdr *);
1884					ph->param_type =
1885					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1886					ph->param_length = htons(SCTP_BUF_LEN(oper));
1887					ippp = (uint32_t *) (ph + 1);
1888					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1889					ippp++;
1890					*ippp = tsn;
1891					ippp++;
1892					*ippp = ((strmno << 16) | strmseq);
1893				}
1894				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1895				sctp_abort_an_association(stcb->sctp_ep, stcb,
1896				    SCTP_PEER_FAULTY, oper);
1897
1898				*abort_flag = 1;
1899				return (0);
1900			} else {
1901				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1902					sctp_m_freem(control->data);
1903					control->data = NULL;
1904					sctp_free_remote_addr(control->whoFrom);
1905					sctp_free_a_readq(stcb, control);
1906
1907					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1908					    0, M_DONTWAIT, 1, MT_DATA);
1909					if (oper) {
1910						struct sctp_paramhdr *ph;
1911						uint32_t *ippp;
1912
1913						SCTP_BUF_LEN(oper) =
1914						    sizeof(struct sctp_paramhdr) +
1915						    (3 * sizeof(uint32_t));
1916						ph = mtod(oper,
1917						    struct sctp_paramhdr *);
1918						ph->param_type =
1919						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1920						ph->param_length =
1921						    htons(SCTP_BUF_LEN(oper));
1922						ippp = (uint32_t *) (ph + 1);
1923						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1924						ippp++;
1925						*ippp = tsn;
1926						ippp++;
1927						*ippp = ((strmno << 16) | strmseq);
1928					}
1929					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1930					sctp_abort_an_association(stcb->sctp_ep,
1931					    stcb, SCTP_PEER_FAULTY, oper);
1932
1933					*abort_flag = 1;
1934					return (0);
1935				}
1936			}
1937		} else {
1938			/* No PDAPI running */
1939			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1940				/*
1941				 * Reassembly queue is NOT empty validate
1942				 * that this tsn does not need to be in
1943				 * reasembly queue. If it does then our peer
1944				 * is broken or evil.
1945				 */
1946				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1947					sctp_m_freem(control->data);
1948					control->data = NULL;
1949					sctp_free_remote_addr(control->whoFrom);
1950					sctp_free_a_readq(stcb, control);
1951					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1952					    0, M_DONTWAIT, 1, MT_DATA);
1953					if (oper) {
1954						struct sctp_paramhdr *ph;
1955						uint32_t *ippp;
1956
1957						SCTP_BUF_LEN(oper) =
1958						    sizeof(struct sctp_paramhdr) +
1959						    (3 * sizeof(uint32_t));
1960						ph = mtod(oper,
1961						    struct sctp_paramhdr *);
1962						ph->param_type =
1963						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1964						ph->param_length =
1965						    htons(SCTP_BUF_LEN(oper));
1966						ippp = (uint32_t *) (ph + 1);
1967						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1968						ippp++;
1969						*ippp = tsn;
1970						ippp++;
1971						*ippp = ((strmno << 16) | strmseq);
1972					}
1973					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1974					sctp_abort_an_association(stcb->sctp_ep,
1975					    stcb, SCTP_PEER_FAULTY, oper);
1976
1977					*abort_flag = 1;
1978					return (0);
1979				}
1980			}
1981		}
1982		/* ok, if we reach here we have passed the sanity checks */
1983		if (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) {
1984			/* queue directly into socket buffer */
1985			sctp_add_to_readq(stcb->sctp_ep, stcb,
1986			    control,
1987			    &stcb->sctp_socket->so_rcv, 1);
1988		} else {
1989			/*
1990			 * Special check for when streams are resetting. We
1991			 * could be more smart about this and check the
1992			 * actual stream to see if it is not being reset..
1993			 * that way we would not create a HOLB when amongst
1994			 * streams being reset and those not being reset.
1995			 *
1996			 * We take complete messages that have a stream reset
1997			 * intervening (aka the TSN is after where our
1998			 * cum-ack needs to be) off and put them on a
1999			 * pending_reply_queue. The reassembly ones we do
2000			 * not have to worry about since they are all sorted
2001			 * and proceessed by TSN order. It is only the
2002			 * singletons I must worry about.
2003			 */
2004			struct sctp_stream_reset_list *liste;
2005
2006			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2007			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)) ||
2008			    (tsn == ntohl(liste->tsn)))
2009			    ) {
2010				/*
2011				 * yep its past where we need to reset... go
2012				 * ahead and queue it.
2013				 */
2014				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2015					/* first one on */
2016					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2017				} else {
2018					struct sctp_queued_to_read *ctlOn;
2019					unsigned char inserted = 0;
2020
2021					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2022					while (ctlOn) {
2023						if (compare_with_wrap(control->sinfo_tsn,
2024						    ctlOn->sinfo_tsn, MAX_TSN)) {
2025							ctlOn = TAILQ_NEXT(ctlOn, next);
2026						} else {
2027							/* found it */
2028							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2029							inserted = 1;
2030							break;
2031						}
2032					}
2033					if (inserted == 0) {
2034						/*
2035						 * must be put at end, use
2036						 * prevP (all setup from
2037						 * loop) to setup nextP.
2038						 */
2039						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2040					}
2041				}
2042			} else {
2043				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2044				if (*abort_flag) {
2045					return (0);
2046				}
2047			}
2048		}
2049	} else {
2050		/* Into the re-assembly queue */
2051		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2052		if (*abort_flag) {
2053			/*
2054			 * the assoc is now gone and chk was put onto the
2055			 * reasm queue, which has all been freed.
2056			 */
2057			*m = NULL;
2058			return (0);
2059		}
2060	}
2061finish_express_del:
2062	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2063		/* we have a new high score */
2064		asoc->highest_tsn_inside_map = tsn;
2065#ifdef SCTP_MAP_LOGGING
2066		sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2067#endif
2068	}
2069	if (tsn == (asoc->cumulative_tsn + 1)) {
2070		/* Update cum-ack */
2071		asoc->cumulative_tsn = tsn;
2072	}
2073	if (last_chunk) {
2074		*m = NULL;
2075	}
2076	if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2077		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2078	} else {
2079		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2080	}
2081	SCTP_STAT_INCR(sctps_recvdata);
2082	/* Set it present please */
2083#ifdef SCTP_STR_LOGGING
2084	sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2085#endif
2086#ifdef SCTP_MAP_LOGGING
2087	sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2088	    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2089#endif
2090	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2091	if (need_reasm_check) {
2092		/* Another one waits ? */
2093		sctp_deliver_reasm_check(stcb, asoc);
2094	}
2095	return (1);
2096}
2097
2098int8_t sctp_map_lookup_tab[256] = {
2099	-1, 0, -1, 1, -1, 0, -1, 2,
2100	-1, 0, -1, 1, -1, 0, -1, 3,
2101	-1, 0, -1, 1, -1, 0, -1, 2,
2102	-1, 0, -1, 1, -1, 0, -1, 4,
2103	-1, 0, -1, 1, -1, 0, -1, 2,
2104	-1, 0, -1, 1, -1, 0, -1, 3,
2105	-1, 0, -1, 1, -1, 0, -1, 2,
2106	-1, 0, -1, 1, -1, 0, -1, 5,
2107	-1, 0, -1, 1, -1, 0, -1, 2,
2108	-1, 0, -1, 1, -1, 0, -1, 3,
2109	-1, 0, -1, 1, -1, 0, -1, 2,
2110	-1, 0, -1, 1, -1, 0, -1, 4,
2111	-1, 0, -1, 1, -1, 0, -1, 2,
2112	-1, 0, -1, 1, -1, 0, -1, 3,
2113	-1, 0, -1, 1, -1, 0, -1, 2,
2114	-1, 0, -1, 1, -1, 0, -1, 6,
2115	-1, 0, -1, 1, -1, 0, -1, 2,
2116	-1, 0, -1, 1, -1, 0, -1, 3,
2117	-1, 0, -1, 1, -1, 0, -1, 2,
2118	-1, 0, -1, 1, -1, 0, -1, 4,
2119	-1, 0, -1, 1, -1, 0, -1, 2,
2120	-1, 0, -1, 1, -1, 0, -1, 3,
2121	-1, 0, -1, 1, -1, 0, -1, 2,
2122	-1, 0, -1, 1, -1, 0, -1, 5,
2123	-1, 0, -1, 1, -1, 0, -1, 2,
2124	-1, 0, -1, 1, -1, 0, -1, 3,
2125	-1, 0, -1, 1, -1, 0, -1, 2,
2126	-1, 0, -1, 1, -1, 0, -1, 4,
2127	-1, 0, -1, 1, -1, 0, -1, 2,
2128	-1, 0, -1, 1, -1, 0, -1, 3,
2129	-1, 0, -1, 1, -1, 0, -1, 2,
2130	-1, 0, -1, 1, -1, 0, -1, 7,
2131};
2132
2133
2134void
2135sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2136{
2137	/*
2138	 * Now we also need to check the mapping array in a couple of ways.
2139	 * 1) Did we move the cum-ack point?
2140	 */
2141	struct sctp_association *asoc;
2142	int i, at;
2143	int all_ones, last_all_ones = 0;
2144	int slide_from, slide_end, lgap, distance;
2145
2146#ifdef SCTP_MAP_LOGGING
2147	uint32_t old_cumack, old_base, old_highest;
2148	unsigned char aux_array[64];
2149
2150#endif
2151	struct sctp_stream_reset_list *liste;
2152
2153	asoc = &stcb->asoc;
2154	at = 0;
2155
2156#ifdef SCTP_MAP_LOGGING
2157	old_cumack = asoc->cumulative_tsn;
2158	old_base = asoc->mapping_array_base_tsn;
2159	old_highest = asoc->highest_tsn_inside_map;
2160	if (asoc->mapping_array_size < 64)
2161		memcpy(aux_array, asoc->mapping_array,
2162		    asoc->mapping_array_size);
2163	else
2164		memcpy(aux_array, asoc->mapping_array, 64);
2165#endif
2166
2167	/*
2168	 * We could probably improve this a small bit by calculating the
2169	 * offset of the current cum-ack as the starting point.
2170	 */
2171	all_ones = 1;
2172	at = 0;
2173	for (i = 0; i < stcb->asoc.mapping_array_size; i++) {
2174		if (asoc->mapping_array[i] == 0xff) {
2175			at += 8;
2176			last_all_ones = 1;
2177		} else {
2178			/* there is a 0 bit */
2179			all_ones = 0;
2180			at += sctp_map_lookup_tab[asoc->mapping_array[i]];
2181			last_all_ones = 0;
2182			break;
2183		}
2184	}
2185	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2186	/* at is one off, since in the table a embedded -1 is present */
2187	at++;
2188
2189	if (compare_with_wrap(asoc->cumulative_tsn,
2190	    asoc->highest_tsn_inside_map,
2191	    MAX_TSN)) {
2192#ifdef INVARIANTS
2193		panic("huh, cumack greater than high-tsn in map");
2194#else
2195		printf("huh, cumack greater than high-tsn in map - should panic?\n");
2196		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2197#endif
2198	}
2199	if (all_ones ||
2200	    (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2201		/* The complete array was completed by a single FR */
2202		/* higest becomes the cum-ack */
2203		int clr;
2204
2205		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2206		/* clear the array */
2207		if (all_ones)
2208			clr = asoc->mapping_array_size;
2209		else {
2210			clr = (at >> 3) + 1;
2211			/*
2212			 * this should be the allones case but just in case
2213			 * :>
2214			 */
2215			if (clr > asoc->mapping_array_size)
2216				clr = asoc->mapping_array_size;
2217		}
2218		memset(asoc->mapping_array, 0, clr);
2219		/* base becomes one ahead of the cum-ack */
2220		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2221#ifdef SCTP_MAP_LOGGING
2222		sctp_log_map(old_base, old_cumack, old_highest,
2223		    SCTP_MAP_PREPARE_SLIDE);
2224		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2225		    asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2226#endif
2227	} else if (at >= 8) {
2228		/* we can slide the mapping array down */
2229		/* Calculate the new byte postion we can move down */
2230		slide_from = at >> 3;
2231		/*
2232		 * now calculate the ceiling of the move using our highest
2233		 * TSN value
2234		 */
2235		if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2236			lgap = asoc->highest_tsn_inside_map -
2237			    asoc->mapping_array_base_tsn;
2238		} else {
2239			lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2240			    asoc->highest_tsn_inside_map + 1;
2241		}
2242		slide_end = lgap >> 3;
2243		if (slide_end < slide_from) {
2244			panic("impossible slide");
2245		}
2246		distance = (slide_end - slide_from) + 1;
2247#ifdef SCTP_MAP_LOGGING
2248		sctp_log_map(old_base, old_cumack, old_highest,
2249		    SCTP_MAP_PREPARE_SLIDE);
2250		sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2251		    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2252#endif
2253		if (distance + slide_from > asoc->mapping_array_size ||
2254		    distance < 0) {
2255			/*
2256			 * Here we do NOT slide forward the array so that
2257			 * hopefully when more data comes in to fill it up
2258			 * we will be able to slide it forward. Really I
2259			 * don't think this should happen :-0
2260			 */
2261
2262#ifdef SCTP_MAP_LOGGING
2263			sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2264			    (uint32_t) asoc->mapping_array_size,
2265			    SCTP_MAP_SLIDE_NONE);
2266#endif
2267		} else {
2268			int ii;
2269
2270			for (ii = 0; ii < distance; ii++) {
2271				asoc->mapping_array[ii] =
2272				    asoc->mapping_array[slide_from + ii];
2273			}
2274			for (ii = distance; ii <= slide_end; ii++) {
2275				asoc->mapping_array[ii] = 0;
2276			}
2277			asoc->mapping_array_base_tsn += (slide_from << 3);
2278#ifdef SCTP_MAP_LOGGING
2279			sctp_log_map(asoc->mapping_array_base_tsn,
2280			    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2281			    SCTP_MAP_SLIDE_RESULT);
2282#endif
2283		}
2284	}
2285	/* check the special flag for stream resets */
2286	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2287	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2288	    (asoc->cumulative_tsn == liste->tsn))
2289	    ) {
2290		/*
2291		 * we have finished working through the backlogged TSN's now
2292		 * time to reset streams. 1: call reset function. 2: free
2293		 * pending_reply space 3: distribute any chunks in
2294		 * pending_reply_queue.
2295		 */
2296		struct sctp_queued_to_read *ctl;
2297
2298		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2299		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2300		SCTP_FREE(liste);
2301		liste = TAILQ_FIRST(&asoc->resetHead);
2302		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2303		if (ctl && (liste == NULL)) {
2304			/* All can be removed */
2305			while (ctl) {
2306				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2307				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2308				if (*abort_flag) {
2309					return;
2310				}
2311				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2312			}
2313		} else if (ctl) {
2314			/* more than one in queue */
2315			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2316				/*
2317				 * if ctl->sinfo_tsn is <= liste->tsn we can
2318				 * process it which is the NOT of
2319				 * ctl->sinfo_tsn > liste->tsn
2320				 */
2321				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2322				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2323				if (*abort_flag) {
2324					return;
2325				}
2326				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2327			}
2328		}
2329		/*
2330		 * Now service re-assembly to pick up anything that has been
2331		 * held on reassembly queue?
2332		 */
2333		sctp_deliver_reasm_check(stcb, asoc);
2334	}
2335	/*
2336	 * Now we need to see if we need to queue a sack or just start the
2337	 * timer (if allowed).
2338	 */
2339	if (ok_to_sack) {
2340		if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2341			/*
2342			 * Ok special case, in SHUTDOWN-SENT case. here we
2343			 * maker sure SACK timer is off and instead send a
2344			 * SHUTDOWN and a SACK
2345			 */
2346			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2347				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2348				    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2349			}
2350			sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2351			sctp_send_sack(stcb);
2352		} else {
2353			int is_a_gap;
2354
2355			/* is there a gap now ? */
2356			is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2357			    stcb->asoc.cumulative_tsn, MAX_TSN);
2358
2359			/*
2360			 * CMT DAC algorithm: increase number of packets
2361			 * received since last ack
2362			 */
2363			stcb->asoc.cmt_dac_pkts_rcvd++;
2364
2365			if ((stcb->asoc.first_ack_sent == 0) ||	/* First time we send a
2366								 * sack */
2367			    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2368								 * longer is one */
2369			    (stcb->asoc.numduptsns) ||	/* we have dup's */
2370			    (is_a_gap) ||	/* is still a gap */
2371			    (stcb->asoc.delayed_ack == 0) ||
2372			    (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))	/* timer was up . second
2373											 * packet */
2374			    ) {
2375
2376				if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) &&
2377				    (stcb->asoc.first_ack_sent == 1) &&
2378				    (stcb->asoc.numduptsns == 0) &&
2379				    (stcb->asoc.delayed_ack) &&
2380				    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2381
2382					/*
2383					 * CMT DAC algorithm: With CMT,
2384					 * delay acks even in the face of
2385					 *
2386					 * reordering. Therefore, if acks that
2387					 * do not have to be sent because of
2388					 * the above reasons, will be
2389					 * delayed. That is, acks that would
2390					 * have been sent due to gap reports
2391					 * will be delayed with DAC. Start
2392					 * the delayed ack timer.
2393					 */
2394					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2395					    stcb->sctp_ep, stcb, NULL);
2396				} else {
2397					/*
2398					 * Ok we must build a SACK since the
2399					 * timer is pending, we got our
2400					 * first packet OR there are gaps or
2401					 * duplicates.
2402					 */
2403					stcb->asoc.first_ack_sent = 1;
2404
2405					sctp_send_sack(stcb);
2406					/* The sending will stop the timer */
2407				}
2408			} else {
2409				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2410				    stcb->sctp_ep, stcb, NULL);
2411			}
2412		}
2413	}
2414}
2415
2416void
2417sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2418{
2419	struct sctp_tmit_chunk *chk;
2420	uint32_t tsize;
2421	uint16_t nxt_todel;
2422
2423	if (asoc->fragmented_delivery_inprogress) {
2424		sctp_service_reassembly(stcb, asoc);
2425	}
2426	/* Can we proceed further, i.e. the PD-API is complete */
2427	if (asoc->fragmented_delivery_inprogress) {
2428		/* no */
2429		return;
2430	}
2431	/*
2432	 * Now is there some other chunk I can deliver from the reassembly
2433	 * queue.
2434	 */
2435doit_again:
2436	chk = TAILQ_FIRST(&asoc->reasmqueue);
2437	if (chk == NULL) {
2438		asoc->size_on_reasm_queue = 0;
2439		asoc->cnt_on_reasm_queue = 0;
2440		return;
2441	}
2442	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2443	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2444	    ((nxt_todel == chk->rec.data.stream_seq) ||
2445	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2446		/*
2447		 * Yep the first one is here. We setup to start reception,
2448		 * by backing down the TSN just in case we can't deliver.
2449		 */
2450
2451		/*
2452		 * Before we start though either all of the message should
2453		 * be here or 1/4 the socket buffer max or nothing on the
2454		 * delivery queue and something can be delivered.
2455		 */
2456		if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2457		    (tsize > stcb->sctp_ep->partial_delivery_point))) {
2458			asoc->fragmented_delivery_inprogress = 1;
2459			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2460			asoc->str_of_pdapi = chk->rec.data.stream_number;
2461			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2462			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2463			asoc->fragment_flags = chk->rec.data.rcv_flags;
2464			sctp_service_reassembly(stcb, asoc);
2465			if (asoc->fragmented_delivery_inprogress == 0) {
2466				goto doit_again;
2467			}
2468		}
2469	}
2470}
2471
2472extern int sctp_strict_data_order;
2473
2474int
2475sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2476    struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2477    struct sctp_nets *net, uint32_t * high_tsn)
2478{
2479	struct sctp_data_chunk *ch, chunk_buf;
2480	struct sctp_association *asoc;
2481	int num_chunks = 0;	/* number of control chunks processed */
2482	int stop_proc = 0;
2483	int chk_length, break_flag, last_chunk;
2484	int abort_flag = 0, was_a_gap = 0;
2485	struct mbuf *m;
2486
2487	/* set the rwnd */
2488	sctp_set_rwnd(stcb, &stcb->asoc);
2489
2490	m = *mm;
2491	SCTP_TCB_LOCK_ASSERT(stcb);
2492	asoc = &stcb->asoc;
2493	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2494	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2495	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
2496		/*
2497		 * wait a minute, this guy is gone, there is no longer a
2498		 * receiver. Send peer an ABORT!
2499		 */
2500		struct mbuf *op_err;
2501
2502		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2503		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
2504		return (2);
2505	}
2506	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2507	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2508		/* there was a gap before this data was processed */
2509		was_a_gap = 1;
2510	}
2511	/*
2512	 * setup where we got the last DATA packet from for any SACK that
2513	 * may need to go out. Don't bump the net. This is done ONLY when a
2514	 * chunk is assigned.
2515	 */
2516	asoc->last_data_chunk_from = net;
2517
2518	/*
2519	 * Now before we proceed we must figure out if this is a wasted
2520	 * cluster... i.e. it is a small packet sent in and yet the driver
2521	 * underneath allocated a full cluster for it. If so we must copy it
2522	 * to a smaller mbuf and free up the cluster mbuf. This will help
2523	 * with cluster starvation.
2524	 */
2525	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2526		/* we only handle mbufs that are singletons.. not chains */
2527		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2528		if (m) {
2529			/* ok lets see if we can copy the data up */
2530			caddr_t *from, *to;
2531
2532			/* get the pointers and copy */
2533			to = mtod(m, caddr_t *);
2534			from = mtod((*mm), caddr_t *);
2535			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2536			/* copy the length and free up the old */
2537			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2538			sctp_m_freem(*mm);
2539			/* sucess, back copy */
2540			*mm = m;
2541		} else {
2542			/* We are in trouble in the mbuf world .. yikes */
2543			m = *mm;
2544		}
2545	}
2546	/* get pointer to the first chunk header */
2547	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2548	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2549	if (ch == NULL) {
2550		return (1);
2551	}
2552	/*
2553	 * process all DATA chunks...
2554	 */
2555	*high_tsn = asoc->cumulative_tsn;
2556	break_flag = 0;
2557	while (stop_proc == 0) {
2558		/* validate chunk length */
2559		chk_length = ntohs(ch->ch.chunk_length);
2560		if (length - *offset < chk_length) {
2561			/* all done, mutulated chunk */
2562			stop_proc = 1;
2563			break;
2564		}
2565		if (ch->ch.chunk_type == SCTP_DATA) {
2566			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2567				/*
2568				 * Need to send an abort since we had a
2569				 * invalid data chunk.
2570				 */
2571				struct mbuf *op_err;
2572
2573				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2574				    0, M_DONTWAIT, 1, MT_DATA);
2575
2576				if (op_err) {
2577					struct sctp_paramhdr *ph;
2578					uint32_t *ippp;
2579
2580					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2581					    (2 * sizeof(uint32_t));
2582					ph = mtod(op_err, struct sctp_paramhdr *);
2583					ph->param_type =
2584					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2585					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2586					ippp = (uint32_t *) (ph + 1);
2587					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2588					ippp++;
2589					*ippp = asoc->cumulative_tsn;
2590
2591				}
2592				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2593				sctp_abort_association(inp, stcb, m, iphlen, sh,
2594				    op_err);
2595				return (2);
2596			}
2597#ifdef SCTP_AUDITING_ENABLED
2598			sctp_audit_log(0xB1, 0);
2599#endif
2600			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2601				last_chunk = 1;
2602			} else {
2603				last_chunk = 0;
2604			}
2605			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2606			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2607			    last_chunk)) {
2608				num_chunks++;
2609			}
2610			if (abort_flag)
2611				return (2);
2612
2613			if (break_flag) {
2614				/*
2615				 * Set because of out of rwnd space and no
2616				 * drop rep space left.
2617				 */
2618				stop_proc = 1;
2619				break;
2620			}
2621		} else {
2622			/* not a data chunk in the data region */
2623			switch (ch->ch.chunk_type) {
2624			case SCTP_INITIATION:
2625			case SCTP_INITIATION_ACK:
2626			case SCTP_SELECTIVE_ACK:
2627			case SCTP_HEARTBEAT_REQUEST:
2628			case SCTP_HEARTBEAT_ACK:
2629			case SCTP_ABORT_ASSOCIATION:
2630			case SCTP_SHUTDOWN:
2631			case SCTP_SHUTDOWN_ACK:
2632			case SCTP_OPERATION_ERROR:
2633			case SCTP_COOKIE_ECHO:
2634			case SCTP_COOKIE_ACK:
2635			case SCTP_ECN_ECHO:
2636			case SCTP_ECN_CWR:
2637			case SCTP_SHUTDOWN_COMPLETE:
2638			case SCTP_AUTHENTICATION:
2639			case SCTP_ASCONF_ACK:
2640			case SCTP_PACKET_DROPPED:
2641			case SCTP_STREAM_RESET:
2642			case SCTP_FORWARD_CUM_TSN:
2643			case SCTP_ASCONF:
2644				/*
2645				 * Now, what do we do with KNOWN chunks that
2646				 * are NOT in the right place?
2647				 *
2648				 * For now, I do nothing but ignore them. We
2649				 * may later want to add sysctl stuff to
2650				 * switch out and do either an ABORT() or
2651				 * possibly process them.
2652				 */
2653				if (sctp_strict_data_order) {
2654					struct mbuf *op_err;
2655
2656					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2657					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
2658					return (2);
2659				}
2660				break;
2661			default:
2662				/* unknown chunk type, use bit rules */
2663				if (ch->ch.chunk_type & 0x40) {
2664					/* Add a error report to the queue */
2665					struct mbuf *mm;
2666					struct sctp_paramhdr *phd;
2667
2668					mm = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2669					if (mm) {
2670						phd = mtod(mm, struct sctp_paramhdr *);
2671						/*
2672						 * We cheat and use param
2673						 * type since we did not
2674						 * bother to define a error
2675						 * cause struct. They are
2676						 * the same basic format
2677						 * with different names.
2678						 */
2679						phd->param_type =
2680						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2681						phd->param_length =
2682						    htons(chk_length + sizeof(*phd));
2683						SCTP_BUF_LEN(mm) = sizeof(*phd);
2684						SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset,
2685						    SCTP_SIZE32(chk_length),
2686						    M_DONTWAIT);
2687						if (SCTP_BUF_NEXT(mm)) {
2688							sctp_queue_op_err(stcb, mm);
2689						} else {
2690							sctp_m_freem(mm);
2691						}
2692					}
2693				}
2694				if ((ch->ch.chunk_type & 0x80) == 0) {
2695					/* discard the rest of this packet */
2696					stop_proc = 1;
2697				}	/* else skip this bad chunk and
2698					 * continue... */
2699				break;
2700			};	/* switch of chunk type */
2701		}
2702		*offset += SCTP_SIZE32(chk_length);
2703		if ((*offset >= length) || stop_proc) {
2704			/* no more data left in the mbuf chain */
2705			stop_proc = 1;
2706			continue;
2707		}
2708		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2709		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2710		if (ch == NULL) {
2711			*offset = length;
2712			stop_proc = 1;
2713			break;
2714
2715		}
2716	}			/* while */
2717	if (break_flag) {
2718		/*
2719		 * we need to report rwnd overrun drops.
2720		 */
2721		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2722	}
2723	if (num_chunks) {
2724		/*
2725		 * Did we get data, if so update the time for auto-close and
2726		 * give peer credit for being alive.
2727		 */
2728		SCTP_STAT_INCR(sctps_recvpktwithdata);
2729		stcb->asoc.overall_error_count = 0;
2730		SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2731	}
2732	/* now service all of the reassm queue if needed */
2733	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2734		sctp_service_queues(stcb, asoc);
2735
2736	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2737		/*
2738		 * Assure that we ack right away by making sure that a d-ack
2739		 * timer is running. So the sack_check will send a sack.
2740		 */
2741		sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2742		    net);
2743	}
2744	/* Start a sack timer or QUEUE a SACK for sending */
2745	if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
2746	    (stcb->asoc.first_ack_sent)) {
2747		/* Everything is in order */
2748		if (stcb->asoc.mapping_array[0] == 0xff) {
2749			/* need to do the slide */
2750			sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2751		} else {
2752			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2753				stcb->asoc.first_ack_sent = 1;
2754				SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2755				sctp_send_sack(stcb);
2756			} else {
2757				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2758				    stcb->sctp_ep, stcb, NULL);
2759			}
2760		}
2761	} else {
2762		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2763	}
2764	if (abort_flag)
2765		return (2);
2766
2767	return (0);
2768}
2769
2770static void
2771sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2772    struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2773    uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2774    int num_seg, int *ecn_seg_sums)
2775{
2776	/************************************************/
2777	/* process fragments and update sendqueue        */
2778	/************************************************/
2779	struct sctp_sack *sack;
2780	struct sctp_gap_ack_block *frag;
2781	struct sctp_tmit_chunk *tp1;
2782	int i;
2783	unsigned int j;
2784
2785#ifdef SCTP_FR_LOGGING
2786	int num_frs = 0;
2787
2788#endif
2789	uint16_t frag_strt, frag_end, primary_flag_set;
2790	u_long last_frag_high;
2791
2792	/*
2793	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
2794	 */
2795	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2796		primary_flag_set = 1;
2797	} else {
2798		primary_flag_set = 0;
2799	}
2800
2801	sack = &ch->sack;
2802	frag = (struct sctp_gap_ack_block *)((caddr_t)sack +
2803	    sizeof(struct sctp_sack));
2804	tp1 = NULL;
2805	last_frag_high = 0;
2806	for (i = 0; i < num_seg; i++) {
2807		frag_strt = ntohs(frag->start);
2808		frag_end = ntohs(frag->end);
2809		/* some sanity checks on the fargment offsets */
2810		if (frag_strt > frag_end) {
2811			/* this one is malformed, skip */
2812			frag++;
2813			continue;
2814		}
2815		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
2816		    MAX_TSN))
2817			*biggest_tsn_acked = frag_end + last_tsn;
2818
2819		/* mark acked dgs and find out the highestTSN being acked */
2820		if (tp1 == NULL) {
2821			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2822
2823			/* save the locations of the last frags */
2824			last_frag_high = frag_end + last_tsn;
2825		} else {
2826			/*
2827			 * now lets see if we need to reset the queue due to
2828			 * a out-of-order SACK fragment
2829			 */
2830			if (compare_with_wrap(frag_strt + last_tsn,
2831			    last_frag_high, MAX_TSN)) {
2832				/*
2833				 * if the new frag starts after the last TSN
2834				 * frag covered, we are ok and this one is
2835				 * beyond the last one
2836				 */
2837				;
2838			} else {
2839				/*
2840				 * ok, they have reset us, so we need to
2841				 * reset the queue this will cause extra
2842				 * hunting but hey, they chose the
2843				 * performance hit when they failed to order
2844				 * there gaps..
2845				 */
2846				tp1 = TAILQ_FIRST(&asoc->sent_queue);
2847			}
2848			last_frag_high = frag_end + last_tsn;
2849		}
2850		for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2851			while (tp1) {
2852#ifdef SCTP_FR_LOGGING
2853				if (tp1->rec.data.doing_fast_retransmit)
2854					num_frs++;
2855#endif
2856
2857				/*
2858				 * CMT: CUCv2 algorithm. For each TSN being
2859				 * processed from the sent queue, track the
2860				 * next expected pseudo-cumack, or
2861				 * rtx_pseudo_cumack, if required. Separate
2862				 * cumack trackers for first transmissions,
2863				 * and retransmissions.
2864				 */
2865				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2866				    (tp1->snd_count == 1)) {
2867					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2868					tp1->whoTo->find_pseudo_cumack = 0;
2869				}
2870				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2871				    (tp1->snd_count > 1)) {
2872					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2873					tp1->whoTo->find_rtx_pseudo_cumack = 0;
2874				}
2875				if (tp1->rec.data.TSN_seq == j) {
2876					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2877						/*
2878						 * must be held until
2879						 * cum-ack passes
2880						 */
2881						/*
2882						 * ECN Nonce: Add the nonce
2883						 * value to the sender's
2884						 * nonce sum
2885						 */
2886						if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2887							/*
2888							 * If it is less
2889							 * than ACKED, it is
2890							 * now no-longer in
2891							 * flight. Higher
2892							 * values may
2893							 * already be set
2894							 * via previous Gap
2895							 * Ack Blocks...
2896							 * i.e. ACKED or
2897							 * MARKED.
2898							 */
2899							if (compare_with_wrap(tp1->rec.data.TSN_seq,
2900							    *biggest_newly_acked_tsn, MAX_TSN)) {
2901								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2902							}
2903							/*
2904							 * CMT: SFR algo
2905							 * (and HTNA) - set
2906							 * saw_newack to 1
2907							 * for dest being
2908							 * newly acked.
2909							 * update
2910							 * this_sack_highest_
2911							 * n ewack if
2912							 * appropriate.
2913							 */
2914							if (tp1->rec.data.chunk_was_revoked == 0)
2915								tp1->whoTo->saw_newack = 1;
2916
2917							if (compare_with_wrap(tp1->rec.data.TSN_seq,
2918							    tp1->whoTo->this_sack_highest_newack,
2919							    MAX_TSN)) {
2920								tp1->whoTo->this_sack_highest_newack =
2921								    tp1->rec.data.TSN_seq;
2922							}
2923							/*
2924							 * CMT DAC algo:
2925							 * also update
2926							 * this_sack_lowest_n
2927							 * e wack
2928							 */
2929							if (*this_sack_lowest_newack == 0) {
2930#ifdef SCTP_SACK_LOGGING
2931								sctp_log_sack(*this_sack_lowest_newack,
2932								    last_tsn,
2933								    tp1->rec.data.TSN_seq,
2934								    0,
2935								    0,
2936								    SCTP_LOG_TSN_ACKED);
2937#endif
2938								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2939							}
2940							/*
2941							 * CMT: CUCv2
2942							 * algorithm. If
2943							 * (rtx-)pseudo-cumac
2944							 * k for corresp
2945							 * dest is being
2946							 * acked, then we
2947							 * have a new
2948							 * (rtx-)pseudo-cumac
2949							 * k . Set
2950							 * new_(rtx_)pseudo_c
2951							 * u mack to TRUE so
2952							 * that the cwnd for
2953							 * this dest can be
2954							 * updated. Also
2955							 * trigger search
2956							 * for the next
2957							 * expected
2958							 * (rtx-)pseudo-cumac
2959							 * k . Separate
2960							 * pseudo_cumack
2961							 * trackers for
2962							 * first
2963							 * transmissions and
2964							 * retransmissions.
2965							 */
2966							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2967								if (tp1->rec.data.chunk_was_revoked == 0) {
2968									tp1->whoTo->new_pseudo_cumack = 1;
2969								}
2970								tp1->whoTo->find_pseudo_cumack = 1;
2971							}
2972#ifdef SCTP_CWND_LOGGING
2973							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2974#endif
2975							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2976								if (tp1->rec.data.chunk_was_revoked == 0) {
2977									tp1->whoTo->new_pseudo_cumack = 1;
2978								}
2979								tp1->whoTo->find_rtx_pseudo_cumack = 1;
2980							}
2981#ifdef SCTP_SACK_LOGGING
2982							sctp_log_sack(*biggest_newly_acked_tsn,
2983							    last_tsn,
2984							    tp1->rec.data.TSN_seq,
2985							    frag_strt,
2986							    frag_end,
2987							    SCTP_LOG_TSN_ACKED);
2988#endif
2989#ifdef SCTP_FLIGHT_LOGGING
2990							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
2991							    tp1->whoTo->flight_size,
2992							    tp1->book_size,
2993							    (uintptr_t) stcb,
2994							    tp1->rec.data.TSN_seq);
2995#endif
2996							if (tp1->whoTo->flight_size >= tp1->book_size)
2997								tp1->whoTo->flight_size -= tp1->book_size;
2998							else
2999								tp1->whoTo->flight_size = 0;
3000							if (asoc->total_flight >= tp1->book_size) {
3001								asoc->total_flight -= tp1->book_size;
3002								if (asoc->total_flight_count > 0)
3003									asoc->total_flight_count--;
3004							} else {
3005								asoc->total_flight = 0;
3006								asoc->total_flight_count = 0;
3007							}
3008
3009							tp1->whoTo->net_ack += tp1->send_size;
3010
3011							if (tp1->snd_count < 2) {
3012								/*
3013								 * True
3014								 * non-retran
3015								 * smited
3016								 * chunk */
3017								tp1->whoTo->net_ack2 += tp1->send_size;
3018
3019								/*
3020								 * update RTO
3021								 * too ? */
3022								if (tp1->do_rtt) {
3023									tp1->whoTo->RTO =
3024									    sctp_calculate_rto(stcb,
3025									    asoc,
3026									    tp1->whoTo,
3027									    &tp1->sent_rcv_time);
3028									tp1->whoTo->rto_pending = 0;
3029									tp1->do_rtt = 0;
3030								}
3031							}
3032						}
3033						if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
3034						    tp1->sent != SCTP_DATAGRAM_UNSENT &&
3035						    compare_with_wrap(tp1->rec.data.TSN_seq,
3036						    asoc->this_sack_highest_gap,
3037						    MAX_TSN)) {
3038							asoc->this_sack_highest_gap =
3039							    tp1->rec.data.TSN_seq;
3040						}
3041						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3042							sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3043#ifdef SCTP_AUDITING_ENABLED
3044							sctp_audit_log(0xB2,
3045							    (asoc->sent_queue_retran_cnt & 0x000000ff));
3046#endif
3047
3048						}
3049						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3050						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3051
3052						tp1->sent = SCTP_DATAGRAM_MARKED;
3053					}
3054					break;
3055				}	/* if (tp1->TSN_seq == j) */
3056				if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
3057				    MAX_TSN))
3058					break;
3059
3060				tp1 = TAILQ_NEXT(tp1, sctp_next);
3061			}	/* end while (tp1) */
3062		}		/* end for (j = fragStart */
3063		frag++;		/* next one */
3064	}
3065#ifdef SCTP_FR_LOGGING
3066	/*
3067	 * if (num_frs) sctp_log_fr(*biggest_tsn_acked,
3068	 * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3069	 */
3070#endif
3071}
3072
3073static void
3074sctp_check_for_revoked(struct sctp_association *asoc, uint32_t cumack,
3075    u_long biggest_tsn_acked)
3076{
3077	struct sctp_tmit_chunk *tp1;
3078	int tot_revoked = 0;
3079
3080	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3081	while (tp1) {
3082		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3083		    MAX_TSN)) {
3084			/*
3085			 * ok this guy is either ACK or MARKED. If it is
3086			 * ACKED it has been previously acked but not this
3087			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3088			 * again.
3089			 */
3090			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3091				/* it has been revoked */
3092				tp1->sent = SCTP_DATAGRAM_SENT;
3093				tp1->rec.data.chunk_was_revoked = 1;
3094				/*
3095				 * We must add this stuff back in to assure
3096				 * timers and such get started.
3097				 */
3098				tp1->whoTo->flight_size += tp1->book_size;
3099				asoc->total_flight_count++;
3100				asoc->total_flight += tp1->book_size;
3101				tot_revoked++;
3102#ifdef SCTP_SACK_LOGGING
3103				sctp_log_sack(asoc->last_acked_seq,
3104				    cumack,
3105				    tp1->rec.data.TSN_seq,
3106				    0,
3107				    0,
3108				    SCTP_LOG_TSN_REVOKED);
3109#endif
3110			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3111				/* it has been re-acked in this SACK */
3112				tp1->sent = SCTP_DATAGRAM_ACKED;
3113			}
3114		}
3115		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3116			break;
3117		tp1 = TAILQ_NEXT(tp1, sctp_next);
3118	}
3119	if (tot_revoked > 0) {
3120		/*
3121		 * Setup the ecn nonce re-sync point. We do this since once
3122		 * data is revoked we begin to retransmit things, which do
3123		 * NOT have the ECN bits set. This means we are now out of
3124		 * sync and must wait until we get back in sync with the
3125		 * peer to check ECN bits.
3126		 */
3127		tp1 = TAILQ_FIRST(&asoc->send_queue);
3128		if (tp1 == NULL) {
3129			asoc->nonce_resync_tsn = asoc->sending_seq;
3130		} else {
3131			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3132		}
3133		asoc->nonce_wait_for_ecne = 0;
3134		asoc->nonce_sum_check = 0;
3135	}
3136}
3137
3138extern int sctp_peer_chunk_oh;
3139
3140static void
3141sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3142    u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3143{
3144	struct sctp_tmit_chunk *tp1;
3145	int strike_flag = 0;
3146	struct timeval now;
3147	int tot_retrans = 0;
3148	uint32_t sending_seq;
3149	struct sctp_nets *net;
3150	int num_dests_sacked = 0;
3151
3152	/*
3153	 * select the sending_seq, this is either the next thing ready to be
3154	 * sent but not transmitted, OR, the next seq we assign.
3155	 */
3156	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3157	if (tp1 == NULL) {
3158		sending_seq = asoc->sending_seq;
3159	} else {
3160		sending_seq = tp1->rec.data.TSN_seq;
3161	}
3162
3163	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3164	if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3165		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3166			if (net->saw_newack)
3167				num_dests_sacked++;
3168		}
3169	}
3170	if (stcb->asoc.peer_supports_prsctp) {
3171		SCTP_GETTIME_TIMEVAL(&now);
3172	}
3173	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3174	while (tp1) {
3175		strike_flag = 0;
3176		if (tp1->no_fr_allowed) {
3177			/* this one had a timeout or something */
3178			tp1 = TAILQ_NEXT(tp1, sctp_next);
3179			continue;
3180		}
3181#ifdef SCTP_FR_LOGGING
3182		if (tp1->sent < SCTP_DATAGRAM_RESEND)
3183			sctp_log_fr(biggest_tsn_newly_acked,
3184			    tp1->rec.data.TSN_seq,
3185			    tp1->sent,
3186			    SCTP_FR_LOG_CHECK_STRIKE);
3187#endif
3188		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3189		    MAX_TSN) ||
3190		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3191			/* done */
3192			break;
3193		}
3194		if (stcb->asoc.peer_supports_prsctp) {
3195			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3196				/* Is it expired? */
3197				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3198					/* Yes so drop it */
3199					if (tp1->data != NULL) {
3200						sctp_release_pr_sctp_chunk(stcb, tp1,
3201						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3202						    &asoc->sent_queue);
3203					}
3204					tp1 = TAILQ_NEXT(tp1, sctp_next);
3205					continue;
3206				}
3207			}
3208			if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3209				/* Has it been retransmitted tv_sec times? */
3210				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3211					/* Yes, so drop it */
3212					if (tp1->data != NULL) {
3213						sctp_release_pr_sctp_chunk(stcb, tp1,
3214						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3215						    &asoc->sent_queue);
3216					}
3217					tp1 = TAILQ_NEXT(tp1, sctp_next);
3218					continue;
3219				}
3220			}
3221		}
3222		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3223		    asoc->this_sack_highest_gap, MAX_TSN)) {
3224			/* we are beyond the tsn in the sack  */
3225			break;
3226		}
3227		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3228			/* either a RESEND, ACKED, or MARKED */
3229			/* skip */
3230			tp1 = TAILQ_NEXT(tp1, sctp_next);
3231			continue;
3232		}
3233		/*
3234		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3235		 */
3236		if (tp1->whoTo->saw_newack == 0) {
3237			/*
3238			 * No new acks were receieved for data sent to this
3239			 * dest. Therefore, according to the SFR algo for
3240			 * CMT, no data sent to this dest can be marked for
3241			 * FR using this SACK. (iyengar@cis.udel.edu,
3242			 * 2005/05/12)
3243			 */
3244			tp1 = TAILQ_NEXT(tp1, sctp_next);
3245			continue;
3246		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3247		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3248			/*
3249			 * CMT: New acks were receieved for data sent to
3250			 * this dest. But no new acks were seen for data
3251			 * sent after tp1. Therefore, according to the SFR
3252			 * algo for CMT, tp1 cannot be marked for FR using
3253			 * this SACK. This step covers part of the DAC algo
3254			 * and the HTNA algo as well.
3255			 */
3256			tp1 = TAILQ_NEXT(tp1, sctp_next);
3257			continue;
3258		}
3259		/*
3260		 * Here we check to see if we were have already done a FR
3261		 * and if so we see if the biggest TSN we saw in the sack is
3262		 * smaller than the recovery point. If so we don't strike
3263		 * the tsn... otherwise we CAN strike the TSN.
3264		 */
3265		/*
3266		 * @@@ JRI: Check for CMT
3267		 */
3268		if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3269			/*
3270			 * Strike the TSN if in fast-recovery and cum-ack
3271			 * moved.
3272			 */
3273#ifdef SCTP_FR_LOGGING
3274			sctp_log_fr(biggest_tsn_newly_acked,
3275			    tp1->rec.data.TSN_seq,
3276			    tp1->sent,
3277			    SCTP_FR_LOG_STRIKE_CHUNK);
3278#endif
3279			tp1->sent++;
3280			if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3281				/*
3282				 * CMT DAC algorithm: If SACK flag is set to
3283				 * 0, then lowest_newack test will not pass
3284				 * because it would have been set to the
3285				 * cumack earlier. If not already to be
3286				 * rtx'd, If not a mixed sack and if tp1 is
3287				 * not between two sacked TSNs, then mark by
3288				 * one more.
3289				 */
3290				if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3291				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3292#ifdef SCTP_FR_LOGGING
3293					sctp_log_fr(16 + num_dests_sacked,
3294					    tp1->rec.data.TSN_seq,
3295					    tp1->sent,
3296					    SCTP_FR_LOG_STRIKE_CHUNK);
3297#endif
3298					tp1->sent++;
3299				}
3300			}
3301		} else if (tp1->rec.data.doing_fast_retransmit) {
3302			/*
3303			 * For those that have done a FR we must take
3304			 * special consideration if we strike. I.e the
3305			 * biggest_newly_acked must be higher than the
3306			 * sending_seq at the time we did the FR.
3307			 */
3308#ifdef SCTP_FR_TO_ALTERNATE
3309			/*
3310			 * If FR's go to new networks, then we must only do
3311			 * this for singly homed asoc's. However if the FR's
3312			 * go to the same network (Armando's work) then its
3313			 * ok to FR multiple times.
3314			 */
3315			if (asoc->numnets < 2)
3316#else
3317			if (1)
3318#endif
3319			{
3320				if ((compare_with_wrap(biggest_tsn_newly_acked,
3321				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3322				    (biggest_tsn_newly_acked ==
3323				    tp1->rec.data.fast_retran_tsn)) {
3324					/*
3325					 * Strike the TSN, since this ack is
3326					 * beyond where things were when we
3327					 * did a FR.
3328					 */
3329#ifdef SCTP_FR_LOGGING
3330					sctp_log_fr(biggest_tsn_newly_acked,
3331					    tp1->rec.data.TSN_seq,
3332					    tp1->sent,
3333					    SCTP_FR_LOG_STRIKE_CHUNK);
3334#endif
3335					tp1->sent++;
3336					strike_flag = 1;
3337					if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3338						/*
3339						 * CMT DAC algorithm: If
3340						 * SACK flag is set to 0,
3341						 * then lowest_newack test
3342						 * will not pass because it
3343						 * would have been set to
3344						 * the cumack earlier. If
3345						 * not already to be rtx'd,
3346						 * If not a mixed sack and
3347						 * if tp1 is not between two
3348						 * sacked TSNs, then mark by
3349						 * one more.
3350						 */
3351						if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3352						    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3353#ifdef SCTP_FR_LOGGING
3354							sctp_log_fr(32 + num_dests_sacked,
3355							    tp1->rec.data.TSN_seq,
3356							    tp1->sent,
3357							    SCTP_FR_LOG_STRIKE_CHUNK);
3358#endif
3359							tp1->sent++;
3360						}
3361					}
3362				}
3363			}
3364			/*
3365			 * @@@ JRI: TODO: remove code for HTNA algo. CMT's
3366			 * SFR algo covers HTNA.
3367			 */
3368		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3369		    biggest_tsn_newly_acked, MAX_TSN)) {
3370			/*
3371			 * We don't strike these: This is the  HTNA
3372			 * algorithm i.e. we don't strike If our TSN is
3373			 * larger than the Highest TSN Newly Acked.
3374			 */
3375			;
3376		} else {
3377			/* Strike the TSN */
3378#ifdef SCTP_FR_LOGGING
3379			sctp_log_fr(biggest_tsn_newly_acked,
3380			    tp1->rec.data.TSN_seq,
3381			    tp1->sent,
3382			    SCTP_FR_LOG_STRIKE_CHUNK);
3383#endif
3384			tp1->sent++;
3385			if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3386				/*
3387				 * CMT DAC algorithm: If SACK flag is set to
3388				 * 0, then lowest_newack test will not pass
3389				 * because it would have been set to the
3390				 * cumack earlier. If not already to be
3391				 * rtx'd, If not a mixed sack and if tp1 is
3392				 * not between two sacked TSNs, then mark by
3393				 * one more.
3394				 */
3395				if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3396				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3397#ifdef SCTP_FR_LOGGING
3398					sctp_log_fr(48 + num_dests_sacked,
3399					    tp1->rec.data.TSN_seq,
3400					    tp1->sent,
3401					    SCTP_FR_LOG_STRIKE_CHUNK);
3402#endif
3403					tp1->sent++;
3404				}
3405			}
3406		}
3407		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3408			/* Increment the count to resend */
3409			struct sctp_nets *alt;
3410
3411			/* printf("OK, we are now ready to FR this guy\n"); */
3412#ifdef SCTP_FR_LOGGING
3413			sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3414			    0, SCTP_FR_MARKED);
3415#endif
3416			if (strike_flag) {
3417				/* This is a subsequent FR */
3418				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3419			}
3420			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3421
3422			if (sctp_cmt_on_off) {
3423				/*
3424				 * CMT: Using RTX_SSTHRESH policy for CMT.
3425				 * If CMT is being used, then pick dest with
3426				 * largest ssthresh for any retransmission.
3427				 * (iyengar@cis.udel.edu, 2005/08/12)
3428				 */
3429				tp1->no_fr_allowed = 1;
3430				alt = tp1->whoTo;
3431				alt = sctp_find_alternate_net(stcb, alt, 1);
3432				/*
3433				 * CUCv2: If a different dest is picked for
3434				 * the retransmission, then new
3435				 * (rtx-)pseudo_cumack needs to be tracked
3436				 * for orig dest. Let CUCv2 track new (rtx-)
3437				 * pseudo-cumack always.
3438				 */
3439				tp1->whoTo->find_pseudo_cumack = 1;
3440				tp1->whoTo->find_rtx_pseudo_cumack = 1;
3441
3442
3443			} else {/* CMT is OFF */
3444
3445#ifdef SCTP_FR_TO_ALTERNATE
3446				/* Can we find an alternate? */
3447				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3448#else
3449				/*
3450				 * default behavior is to NOT retransmit
3451				 * FR's to an alternate. Armando Caro's
3452				 * paper details why.
3453				 */
3454				alt = tp1->whoTo;
3455#endif
3456			}
3457
3458			tp1->rec.data.doing_fast_retransmit = 1;
3459			tot_retrans++;
3460			/* mark the sending seq for possible subsequent FR's */
3461			/*
3462			 * printf("Marking TSN for FR new value %x\n",
3463			 * (uint32_t)tpi->rec.data.TSN_seq);
3464			 */
3465			if (TAILQ_EMPTY(&asoc->send_queue)) {
3466				/*
3467				 * If the queue of send is empty then its
3468				 * the next sequence number that will be
3469				 * assigned so we subtract one from this to
3470				 * get the one we last sent.
3471				 */
3472				tp1->rec.data.fast_retran_tsn = sending_seq;
3473			} else {
3474				/*
3475				 * If there are chunks on the send queue
3476				 * (unsent data that has made it from the
3477				 * stream queues but not out the door, we
3478				 * take the first one (which will have the
3479				 * lowest TSN) and subtract one to get the
3480				 * one we last sent.
3481				 */
3482				struct sctp_tmit_chunk *ttt;
3483
3484				ttt = TAILQ_FIRST(&asoc->send_queue);
3485				tp1->rec.data.fast_retran_tsn =
3486				    ttt->rec.data.TSN_seq;
3487			}
3488
3489			if (tp1->do_rtt) {
3490				/*
3491				 * this guy had a RTO calculation pending on
3492				 * it, cancel it
3493				 */
3494				tp1->whoTo->rto_pending = 0;
3495				tp1->do_rtt = 0;
3496			}
3497			/* fix counts and things */
3498#ifdef SCTP_FLIGHT_LOGGING
3499			sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
3500			    tp1->whoTo->flight_size,
3501			    tp1->book_size,
3502			    (uintptr_t) stcb,
3503			    tp1->rec.data.TSN_seq);
3504#endif
3505			tp1->whoTo->net_ack++;
3506			if (tp1->whoTo->flight_size >= tp1->book_size)
3507				tp1->whoTo->flight_size -= tp1->book_size;
3508			else
3509				tp1->whoTo->flight_size = 0;
3510
3511#ifdef SCTP_LOG_RWND
3512			sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3513			    asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh);
3514#endif
3515			/* add back to the rwnd */
3516			asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3517
3518			/* remove from the total flight */
3519			if (asoc->total_flight >= tp1->book_size) {
3520				asoc->total_flight -= tp1->book_size;
3521				if (asoc->total_flight_count > 0)
3522					asoc->total_flight_count--;
3523			} else {
3524				asoc->total_flight = 0;
3525				asoc->total_flight_count = 0;
3526			}
3527
3528
3529			if (alt != tp1->whoTo) {
3530				/* yes, there is an alternate. */
3531				sctp_free_remote_addr(tp1->whoTo);
3532				tp1->whoTo = alt;
3533				atomic_add_int(&alt->ref_count, 1);
3534			}
3535		}
3536		tp1 = TAILQ_NEXT(tp1, sctp_next);
3537	}			/* while (tp1) */
3538
3539	if (tot_retrans > 0) {
3540		/*
3541		 * Setup the ecn nonce re-sync point. We do this since once
3542		 * we go to FR something we introduce a Karn's rule scenario
3543		 * and won't know the totals for the ECN bits.
3544		 */
3545		asoc->nonce_resync_tsn = sending_seq;
3546		asoc->nonce_wait_for_ecne = 0;
3547		asoc->nonce_sum_check = 0;
3548	}
3549}
3550
3551struct sctp_tmit_chunk *
3552sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3553    struct sctp_association *asoc)
3554{
3555	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3556	struct timeval now;
3557	int now_filled = 0;
3558
3559	if (asoc->peer_supports_prsctp == 0) {
3560		return (NULL);
3561	}
3562	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3563	while (tp1) {
3564		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3565		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3566			/* no chance to advance, out of here */
3567			break;
3568		}
3569		if (!PR_SCTP_ENABLED(tp1->flags)) {
3570			/*
3571			 * We can't fwd-tsn past any that are reliable aka
3572			 * retransmitted until the asoc fails.
3573			 */
3574			break;
3575		}
3576		if (!now_filled) {
3577			SCTP_GETTIME_TIMEVAL(&now);
3578			now_filled = 1;
3579		}
3580		tp2 = TAILQ_NEXT(tp1, sctp_next);
3581		/*
3582		 * now we got a chunk which is marked for another
3583		 * retransmission to a PR-stream but has run out its chances
3584		 * already maybe OR has been marked to skip now. Can we skip
3585		 * it if its a resend?
3586		 */
3587		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3588		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3589			/*
3590			 * Now is this one marked for resend and its time is
3591			 * now up?
3592			 */
3593			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3594				/* Yes so drop it */
3595				if (tp1->data) {
3596					sctp_release_pr_sctp_chunk(stcb, tp1,
3597					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3598					    &asoc->sent_queue);
3599				}
3600			} else {
3601				/*
3602				 * No, we are done when hit one for resend
3603				 * whos time as not expired.
3604				 */
3605				break;
3606			}
3607		}
3608		/*
3609		 * Ok now if this chunk is marked to drop it we can clean up
3610		 * the chunk, advance our peer ack point and we can check
3611		 * the next chunk.
3612		 */
3613		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3614			/* advance PeerAckPoint goes forward */
3615			asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3616			a_adv = tp1;
3617			/*
3618			 * we don't want to de-queue it here. Just wait for
3619			 * the next peer SACK to come with a new cumTSN and
3620			 * then the chunk will be droped in the normal
3621			 * fashion.
3622			 */
3623			if (tp1->data) {
3624				sctp_free_bufspace(stcb, asoc, tp1, 1);
3625				/*
3626				 * Maybe there should be another
3627				 * notification type
3628				 */
3629				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3630				    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3631				    tp1);
3632				sctp_m_freem(tp1->data);
3633				tp1->data = NULL;
3634				if (stcb->sctp_socket) {
3635					sctp_sowwakeup(stcb->sctp_ep,
3636					    stcb->sctp_socket);
3637#ifdef SCTP_WAKE_LOGGING
3638					sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
3639#endif
3640				}
3641			}
3642		} else {
3643			/*
3644			 * If it is still in RESEND we can advance no
3645			 * further
3646			 */
3647			break;
3648		}
3649		/*
3650		 * If we hit here we just dumped tp1, move to next tsn on
3651		 * sent queue.
3652		 */
3653		tp1 = tp2;
3654	}
3655	return (a_adv);
3656}
3657
3658#ifdef SCTP_HIGH_SPEED
3659struct sctp_hs_raise_drop {
3660	int32_t cwnd;
3661	int32_t increase;
3662	int32_t drop_percent;
3663};
3664
3665#define SCTP_HS_TABLE_SIZE 73
3666
3667struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3668	{38, 1, 50},		/* 0   */
3669	{118, 2, 44},		/* 1   */
3670	{221, 3, 41},		/* 2   */
3671	{347, 4, 38},		/* 3   */
3672	{495, 5, 37},		/* 4   */
3673	{663, 6, 35},		/* 5   */
3674	{851, 7, 34},		/* 6   */
3675	{1058, 8, 33},		/* 7   */
3676	{1284, 9, 32},		/* 8   */
3677	{1529, 10, 31},		/* 9   */
3678	{1793, 11, 30},		/* 10  */
3679	{2076, 12, 29},		/* 11  */
3680	{2378, 13, 28},		/* 12  */
3681	{2699, 14, 28},		/* 13  */
3682	{3039, 15, 27},		/* 14  */
3683	{3399, 16, 27},		/* 15  */
3684	{3778, 17, 26},		/* 16  */
3685	{4177, 18, 26},		/* 17  */
3686	{4596, 19, 25},		/* 18  */
3687	{5036, 20, 25},		/* 19  */
3688	{5497, 21, 24},		/* 20  */
3689	{5979, 22, 24},		/* 21  */
3690	{6483, 23, 23},		/* 22  */
3691	{7009, 24, 23},		/* 23  */
3692	{7558, 25, 22},		/* 24  */
3693	{8130, 26, 22},		/* 25  */
3694	{8726, 27, 22},		/* 26  */
3695	{9346, 28, 21},		/* 27  */
3696	{9991, 29, 21},		/* 28  */
3697	{10661, 30, 21},	/* 29  */
3698	{11358, 31, 20},	/* 30  */
3699	{12082, 32, 20},	/* 31  */
3700	{12834, 33, 20},	/* 32  */
3701	{13614, 34, 19},	/* 33  */
3702	{14424, 35, 19},	/* 34  */
3703	{15265, 36, 19},	/* 35  */
3704	{16137, 37, 19},	/* 36  */
3705	{17042, 38, 18},	/* 37  */
3706	{17981, 39, 18},	/* 38  */
3707	{18955, 40, 18},	/* 39  */
3708	{19965, 41, 17},	/* 40  */
3709	{21013, 42, 17},	/* 41  */
3710	{22101, 43, 17},	/* 42  */
3711	{23230, 44, 17},	/* 43  */
3712	{24402, 45, 16},	/* 44  */
3713	{25618, 46, 16},	/* 45  */
3714	{26881, 47, 16},	/* 46  */
3715	{28193, 48, 16},	/* 47  */
3716	{29557, 49, 15},	/* 48  */
3717	{30975, 50, 15},	/* 49  */
3718	{32450, 51, 15},	/* 50  */
3719	{33986, 52, 15},	/* 51  */
3720	{35586, 53, 14},	/* 52  */
3721	{37253, 54, 14},	/* 53  */
3722	{38992, 55, 14},	/* 54  */
3723	{40808, 56, 14},	/* 55  */
3724	{42707, 57, 13},	/* 56  */
3725	{44694, 58, 13},	/* 57  */
3726	{46776, 59, 13},	/* 58  */
3727	{48961, 60, 13},	/* 59  */
3728	{51258, 61, 13},	/* 60  */
3729	{53677, 62, 12},	/* 61  */
3730	{56230, 63, 12},	/* 62  */
3731	{58932, 64, 12},	/* 63  */
3732	{61799, 65, 12},	/* 64  */
3733	{64851, 66, 11},	/* 65  */
3734	{68113, 67, 11},	/* 66  */
3735	{71617, 68, 11},	/* 67  */
3736	{75401, 69, 10},	/* 68  */
3737	{79517, 70, 10},	/* 69  */
3738	{84035, 71, 10},	/* 70  */
3739	{89053, 72, 10},	/* 71  */
3740	{94717, 73, 9}		/* 72  */
3741};
3742
3743static void
3744sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
3745{
3746	int cur_val, i, indx, incr;
3747
3748	cur_val = net->cwnd >> 10;
3749	indx = SCTP_HS_TABLE_SIZE - 1;
3750
3751	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3752		/* normal mode */
3753		if (net->net_ack > net->mtu) {
3754			net->cwnd += net->mtu;
3755#ifdef SCTP_CWND_MONITOR
3756			sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3757#endif
3758		} else {
3759			net->cwnd += net->net_ack;
3760#ifdef SCTP_CWND_MONITOR
3761			sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3762#endif
3763		}
3764	} else {
3765		for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
3766			if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3767				indx = i;
3768				break;
3769			}
3770		}
3771		net->last_hs_used = indx;
3772		incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3773		net->cwnd += incr;
3774#ifdef SCTP_CWND_MONITOR
3775		sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
3776#endif
3777	}
3778}
3779
3780static void
3781sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
3782{
3783	int cur_val, i, indx;
3784
3785#ifdef SCTP_CWND_MONITOR
3786	int old_cwnd = net->cwnd;
3787
3788#endif
3789
3790	cur_val = net->cwnd >> 10;
3791	indx = net->last_hs_used;
3792	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3793		/* normal mode */
3794		net->ssthresh = net->cwnd / 2;
3795		if (net->ssthresh < (net->mtu * 2)) {
3796			net->ssthresh = 2 * net->mtu;
3797		}
3798		net->cwnd = net->ssthresh;
3799	} else {
3800		/* drop by the proper amount */
3801		net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3802		    sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3803		net->cwnd = net->ssthresh;
3804		/* now where are we */
3805		indx = net->last_hs_used;
3806		cur_val = net->cwnd >> 10;
3807		/* reset where we are in the table */
3808		if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3809			/* feel out of hs */
3810			net->last_hs_used = 0;
3811		} else {
3812			for (i = indx; i >= 1; i--) {
3813				if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3814					break;
3815				}
3816			}
3817			net->last_hs_used = indx;
3818		}
3819	}
3820#ifdef SCTP_CWND_MONITOR
3821	sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
3822#endif
3823
3824}
3825
3826#endif
3827
3828extern int sctp_early_fr;
3829extern int sctp_L2_abc_variable;
3830
3831
3832static __inline void
3833sctp_cwnd_update(struct sctp_tcb *stcb,
3834    struct sctp_association *asoc,
3835    int accum_moved, int reneged_all, int will_exit)
3836{
3837	struct sctp_nets *net;
3838
3839	/******************************/
3840	/* update cwnd and Early FR   */
3841	/******************************/
3842	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3843#ifdef JANA_CODE_WHY_THIS
3844		/*
3845		 * CMT fast recovery code. Need to debug.
3846		 */
3847		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
3848			if (compare_with_wrap(asoc->last_acked_seq,
3849			    net->fast_recovery_tsn, MAX_TSN) ||
3850			    (asoc->last_acked_seq == net->fast_recovery_tsn) ||
3851			    compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
3852			    (net->pseudo_cumack == net->fast_recovery_tsn)) {
3853				net->will_exit_fast_recovery = 1;
3854			}
3855		}
3856#endif
3857		if (sctp_early_fr) {
3858			/*
3859			 * So, first of all do we need to have a Early FR
3860			 * timer running?
3861			 */
3862			if (((TAILQ_FIRST(&asoc->sent_queue)) &&
3863			    (net->ref_count > 1) &&
3864			    (net->flight_size < net->cwnd)) ||
3865			    (reneged_all)) {
3866				/*
3867				 * yes, so in this case stop it if its
3868				 * running, and then restart it. Reneging
3869				 * all is a special case where we want to
3870				 * run the Early FR timer and then force the
3871				 * last few unacked to be sent, causing us
3872				 * to illicit a sack with gaps to force out
3873				 * the others.
3874				 */
3875				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
3876					SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
3877					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
3878					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
3879				}
3880				SCTP_STAT_INCR(sctps_earlyfrstrid);
3881				sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
3882			} else {
3883				/* No, stop it if its running */
3884				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
3885					SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
3886					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
3887					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
3888				}
3889			}
3890		}
3891		/* if nothing was acked on this destination skip it */
3892		if (net->net_ack == 0) {
3893#ifdef SCTP_CWND_LOGGING
3894			sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
3895#endif
3896			continue;
3897		}
3898		if (net->net_ack2 > 0) {
3899			/*
3900			 * Karn's rule applies to clearing error count, this
3901			 * is optional.
3902			 */
3903			net->error_count = 0;
3904			if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
3905			    SCTP_ADDR_NOT_REACHABLE) {
3906				/* addr came good */
3907				net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3908				net->dest_state |= SCTP_ADDR_REACHABLE;
3909				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3910				    SCTP_RECEIVED_SACK, (void *)net);
3911				/* now was it the primary? if so restore */
3912				if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3913					sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
3914				}
3915			}
3916		}
3917#ifdef JANA_CODE_WHY_THIS
3918		/*
3919		 * Cannot skip for CMT. Need to come back and check these
3920		 * variables for CMT. CMT fast recovery code. Need to debug.
3921		 */
3922		if (sctp_cmt_on_off == 1 &&
3923		    net->fast_retran_loss_recovery &&
3924		    net->will_exit_fast_recovery == 0)
3925#endif
3926			if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
3927				/*
3928				 * If we are in loss recovery we skip any
3929				 * cwnd update
3930				 */
3931				goto skip_cwnd_update;
3932			}
3933		/*
3934		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
3935		 * moved.
3936		 */
3937		if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
3938			/* If the cumulative ack moved we can proceed */
3939			if (net->cwnd <= net->ssthresh) {
3940				/* We are in slow start */
3941				if (net->flight_size + net->net_ack >=
3942				    net->cwnd) {
3943#ifdef SCTP_HIGH_SPEED
3944					sctp_hs_cwnd_increase(stcb, net);
3945#else
3946					if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
3947						net->cwnd += (net->mtu * sctp_L2_abc_variable);
3948#ifdef SCTP_CWND_MONITOR
3949						sctp_log_cwnd(stcb, net, net->mtu,
3950						    SCTP_CWND_LOG_FROM_SS);
3951#endif
3952
3953					} else {
3954						net->cwnd += net->net_ack;
3955#ifdef SCTP_CWND_MONITOR
3956						sctp_log_cwnd(stcb, net, net->net_ack,
3957						    SCTP_CWND_LOG_FROM_SS);
3958#endif
3959
3960					}
3961#endif
3962				} else {
3963					unsigned int dif;
3964
3965					dif = net->cwnd - (net->flight_size +
3966					    net->net_ack);
3967#ifdef SCTP_CWND_LOGGING
3968					sctp_log_cwnd(stcb, net, net->net_ack,
3969					    SCTP_CWND_LOG_NOADV_SS);
3970#endif
3971				}
3972			} else {
3973				/* We are in congestion avoidance */
3974				if (net->flight_size + net->net_ack >=
3975				    net->cwnd) {
3976					/*
3977					 * add to pba only if we had a
3978					 * cwnd's worth (or so) in flight OR
3979					 * the burst limit was applied.
3980					 */
3981					net->partial_bytes_acked +=
3982					    net->net_ack;
3983
3984					/*
3985					 * Do we need to increase (if pba is
3986					 * > cwnd)?
3987					 */
3988					if (net->partial_bytes_acked >=
3989					    net->cwnd) {
3990						if (net->cwnd <
3991						    net->partial_bytes_acked) {
3992							net->partial_bytes_acked -=
3993							    net->cwnd;
3994						} else {
3995							net->partial_bytes_acked =
3996							    0;
3997						}
3998						net->cwnd += net->mtu;
3999#ifdef SCTP_CWND_MONITOR
4000						sctp_log_cwnd(stcb, net, net->mtu,
4001						    SCTP_CWND_LOG_FROM_CA);
4002#endif
4003					}
4004#ifdef SCTP_CWND_LOGGING
4005					else {
4006						sctp_log_cwnd(stcb, net, net->net_ack,
4007						    SCTP_CWND_LOG_NOADV_CA);
4008					}
4009#endif
4010				} else {
4011					unsigned int dif;
4012
4013#ifdef SCTP_CWND_LOGGING
4014					sctp_log_cwnd(stcb, net, net->net_ack,
4015					    SCTP_CWND_LOG_NOADV_CA);
4016#endif
4017					dif = net->cwnd - (net->flight_size +
4018					    net->net_ack);
4019				}
4020			}
4021		} else {
4022#ifdef SCTP_CWND_LOGGING
4023			sctp_log_cwnd(stcb, net, net->mtu,
4024			    SCTP_CWND_LOG_NO_CUMACK);
4025#endif
4026		}
4027skip_cwnd_update:
4028		/*
4029		 * NOW, according to Karn's rule do we need to restore the
4030		 * RTO timer back? Check our net_ack2. If not set then we
4031		 * have a ambiguity.. i.e. all data ack'd was sent to more
4032		 * than one place.
4033		 */
4034		if (net->net_ack2) {
4035			/* restore any doubled timers */
4036			net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4037			if (net->RTO < stcb->asoc.minrto) {
4038				net->RTO = stcb->asoc.minrto;
4039			}
4040			if (net->RTO > stcb->asoc.maxrto) {
4041				net->RTO = stcb->asoc.maxrto;
4042			}
4043		}
4044	}
4045}
4046
4047
4048void
4049sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4050    uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4051{
4052	struct sctp_nets *net;
4053	struct sctp_association *asoc;
4054	struct sctp_tmit_chunk *tp1, *tp2;
4055	int j;
4056
4057	SCTP_TCB_LOCK_ASSERT(stcb);
4058	asoc = &stcb->asoc;
4059	/* First setup for CC stuff */
4060	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4061		net->prev_cwnd = net->cwnd;
4062		net->net_ack = 0;
4063		net->net_ack2 = 0;
4064	}
4065	if (sctp_strict_sacks) {
4066		uint32_t send_s;
4067
4068		if (TAILQ_EMPTY(&asoc->send_queue)) {
4069			send_s = asoc->sending_seq;
4070		} else {
4071			tp1 = TAILQ_FIRST(&asoc->send_queue);
4072			send_s = tp1->rec.data.TSN_seq;
4073		}
4074		if ((cumack == send_s) ||
4075		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
4076#ifdef INVARIANTS		/* for testing only */
4077			panic("Impossible sack 1");
4078#else
4079			struct mbuf *oper;
4080
4081			*abort_now = 1;
4082			/* XXX */
4083			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4084			    0, M_DONTWAIT, 1, MT_DATA);
4085			if (oper) {
4086				struct sctp_paramhdr *ph;
4087				uint32_t *ippp;
4088
4089				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4090				    sizeof(uint32_t);
4091				ph = mtod(oper, struct sctp_paramhdr *);
4092				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4093				ph->param_length = htons(SCTP_BUF_LEN(oper));
4094				ippp = (uint32_t *) (ph + 1);
4095				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4096			}
4097			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4098			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
4099			return;
4100#endif
4101		}
4102	}
4103	asoc->this_sack_highest_gap = cumack;
4104	stcb->asoc.overall_error_count = 0;
4105	/* process the new consecutive TSN first */
4106	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4107	while (tp1) {
4108		tp2 = TAILQ_NEXT(tp1, sctp_next);
4109		if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4110		    MAX_TSN) ||
4111		    cumack == tp1->rec.data.TSN_seq) {
4112			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4113				/*
4114				 * ECN Nonce: Add the nonce to the sender's
4115				 * nonce sum
4116				 */
4117				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4118				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4119					/*
4120					 * If it is less than ACKED, it is
4121					 * now no-longer in flight. Higher
4122					 * values may occur during marking
4123					 */
4124#ifdef SCTP_FLIGHT_LOGGING
4125					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
4126					    tp1->whoTo->flight_size,
4127					    tp1->book_size,
4128					    (uintptr_t) stcb,
4129					    tp1->rec.data.TSN_seq);
4130#endif
4131
4132					if (tp1->whoTo->flight_size >= tp1->book_size) {
4133						tp1->whoTo->flight_size -= tp1->book_size;
4134					} else {
4135						tp1->whoTo->flight_size = 0;
4136					}
4137					if (asoc->total_flight >= tp1->book_size) {
4138						asoc->total_flight -= tp1->book_size;
4139						if (asoc->total_flight_count > 0)
4140							asoc->total_flight_count--;
4141					} else {
4142						asoc->total_flight = 0;
4143						asoc->total_flight_count = 0;
4144					}
4145					tp1->whoTo->net_ack += tp1->send_size;
4146					if (tp1->snd_count < 2) {
4147						/*
4148						 * True non-retransmited
4149						 * chunk
4150						 */
4151						tp1->whoTo->net_ack2 +=
4152						    tp1->send_size;
4153
4154						/* update RTO too? */
4155						if ((tp1->do_rtt) && (tp1->whoTo->rto_pending)) {
4156							tp1->whoTo->RTO =
4157							    sctp_calculate_rto(stcb,
4158							    asoc, tp1->whoTo,
4159							    &tp1->sent_rcv_time);
4160							tp1->whoTo->rto_pending = 0;
4161							tp1->do_rtt = 0;
4162						}
4163					}
4164#ifdef SCTP_CWND_LOGGING
4165					sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4166#endif
4167				}
4168				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4169					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4170				}
4171				tp1->sent = SCTP_DATAGRAM_ACKED;
4172			}
4173		} else {
4174			break;
4175		}
4176		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4177		if (tp1->data) {
4178			sctp_free_bufspace(stcb, asoc, tp1, 1);
4179			sctp_m_freem(tp1->data);
4180		}
4181#ifdef SCTP_SACK_LOGGING
4182		sctp_log_sack(asoc->last_acked_seq,
4183		    cumack,
4184		    tp1->rec.data.TSN_seq,
4185		    0,
4186		    0,
4187		    SCTP_LOG_FREE_SENT);
4188#endif
4189		tp1->data = NULL;
4190		asoc->sent_queue_cnt--;
4191		sctp_free_remote_addr(tp1->whoTo);
4192		sctp_free_a_chunk(stcb, tp1);
4193		tp1 = tp2;
4194	}
4195	if (stcb->sctp_socket) {
4196		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4197#ifdef SCTP_WAKE_LOGGING
4198		sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4199#endif
4200		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4201#ifdef SCTP_WAKE_LOGGING
4202	} else {
4203		sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4204#endif
4205	}
4206
4207	if (asoc->last_acked_seq != cumack)
4208		sctp_cwnd_update(stcb, asoc, 1, 0, 0);
4209	asoc->last_acked_seq = cumack;
4210	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4211		/* nothing left in-flight */
4212		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4213			net->flight_size = 0;
4214			net->partial_bytes_acked = 0;
4215		}
4216		asoc->total_flight = 0;
4217		asoc->total_flight_count = 0;
4218	}
4219	/* Fix up the a-p-a-p for future PR-SCTP sends */
4220	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4221		asoc->advanced_peer_ack_point = cumack;
4222	}
4223	/* ECN Nonce updates */
4224	if (asoc->ecn_nonce_allowed) {
4225		if (asoc->nonce_sum_check) {
4226			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4227				if (asoc->nonce_wait_for_ecne == 0) {
4228					struct sctp_tmit_chunk *lchk;
4229
4230					lchk = TAILQ_FIRST(&asoc->send_queue);
4231					asoc->nonce_wait_for_ecne = 1;
4232					if (lchk) {
4233						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4234					} else {
4235						asoc->nonce_wait_tsn = asoc->sending_seq;
4236					}
4237				} else {
4238					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4239					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4240						/*
4241						 * Misbehaving peer. We need
4242						 * to react to this guy
4243						 */
4244						asoc->ecn_allowed = 0;
4245						asoc->ecn_nonce_allowed = 0;
4246					}
4247				}
4248			}
4249		} else {
4250			/* See if Resynchronization Possible */
4251			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4252				asoc->nonce_sum_check = 1;
4253				/*
4254				 * now we must calculate what the base is.
4255				 * We do this based on two things, we know
4256				 * the total's for all the segments
4257				 * gap-acked in the SACK (none), We also
4258				 * know the SACK's nonce sum, its in
4259				 * nonce_sum_flag. So we can build a truth
4260				 * table to back-calculate the new value of
4261				 * asoc->nonce_sum_expect_base:
4262				 *
4263				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4264				 * 1                    0 1 0 1 1 1
4265				 * 1 0
4266				 */
4267				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4268			}
4269		}
4270	}
4271	/* RWND update */
4272	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4273	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4274	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4275		/* SWS sender side engages */
4276		asoc->peers_rwnd = 0;
4277	}
4278	/* Now assure a timer where data is queued at */
4279again:
4280	j = 0;
4281	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4282		if (net->flight_size) {
4283			int to_ticks;
4284
4285			if (net->RTO == 0) {
4286				to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4287			} else {
4288				to_ticks = MSEC_TO_TICKS(net->RTO);
4289			}
4290			j++;
4291			SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4292			    sctp_timeout_handler, &net->rxt_timer);
4293		} else {
4294			if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4295				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4296				    stcb, net,
4297				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4298			}
4299			if (sctp_early_fr) {
4300				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4301					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4302					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4303					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4304				}
4305			}
4306		}
4307	}
4308	if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) {
4309		/* huh, this should not happen */
4310#ifdef INVARIANTS
4311		panic("Flight size incorrect? fixing??");
4312#else
4313		printf("Flight size incorrect?  fixing\n");
4314		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4315			net->flight_size = 0;
4316		}
4317		asoc->total_flight = 0;
4318		asoc->total_flight_count = 0;
4319		asoc->sent_queue_retran_cnt = 0;
4320		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4321			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4322				tp1->whoTo->flight_size += tp1->book_size;
4323				asoc->total_flight += tp1->book_size;
4324				asoc->total_flight_count++;
4325			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4326				asoc->sent_queue_retran_cnt++;
4327			}
4328		}
4329#endif
4330		goto again;
4331	}
4332	/**********************************/
4333	/* Now what about shutdown issues */
4334	/**********************************/
4335	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4336		/* nothing left on sendqueue.. consider done */
4337		/* clean up */
4338		if ((asoc->stream_queue_cnt == 1) &&
4339		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4340		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4341		    (asoc->locked_on_sending)
4342		    ) {
4343			struct sctp_stream_queue_pending *sp;
4344
4345			/*
4346			 * I may be in a state where we got all across.. but
4347			 * cannot write more due to a shutdown... we abort
4348			 * since the user did not indicate EOR in this case.
4349			 * The sp will be cleaned during free of the asoc.
4350			 */
4351			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4352			    sctp_streamhead);
4353			if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
4354				asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4355				asoc->locked_on_sending = NULL;
4356				asoc->stream_queue_cnt--;
4357			}
4358		}
4359		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4360		    (asoc->stream_queue_cnt == 0)) {
4361			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4362				/* Need to abort here */
4363				struct mbuf *oper;
4364
4365		abort_out_now:
4366				*abort_now = 1;
4367				/* XXX */
4368				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4369				    0, M_DONTWAIT, 1, MT_DATA);
4370				if (oper) {
4371					struct sctp_paramhdr *ph;
4372					uint32_t *ippp;
4373
4374					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4375					    sizeof(uint32_t);
4376					ph = mtod(oper, struct sctp_paramhdr *);
4377					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4378					ph->param_length = htons(SCTP_BUF_LEN(oper));
4379					ippp = (uint32_t *) (ph + 1);
4380					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4381				}
4382				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4383				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
4384			} else {
4385				asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4386				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4387				sctp_stop_timers_for_shutdown(stcb);
4388				sctp_send_shutdown(stcb,
4389				    stcb->asoc.primary_destination);
4390				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4391				    stcb->sctp_ep, stcb, asoc->primary_destination);
4392				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4393				    stcb->sctp_ep, stcb, asoc->primary_destination);
4394			}
4395		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4396		    (asoc->stream_queue_cnt == 0)) {
4397			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4398				goto abort_out_now;
4399			}
4400			asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4401			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4402			sctp_send_shutdown_ack(stcb,
4403			    stcb->asoc.primary_destination);
4404
4405			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4406			    stcb->sctp_ep, stcb, asoc->primary_destination);
4407		}
4408	}
4409#ifdef SCTP_SACK_RWND_LOGGING
4410	sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4411	    rwnd,
4412	    stcb->asoc.peers_rwnd,
4413	    stcb->asoc.total_flight,
4414	    stcb->asoc.total_output_queue_size);
4415
4416#endif
4417}
4418
4419
4420
4421void
4422sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4423    struct sctp_nets *net_from, int *abort_now)
4424{
4425	struct sctp_association *asoc;
4426	struct sctp_sack *sack;
4427	struct sctp_tmit_chunk *tp1, *tp2;
4428	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4429	         this_sack_lowest_newack;
4430	uint16_t num_seg, num_dup;
4431	uint16_t wake_him = 0;
4432	unsigned int sack_length;
4433	uint32_t send_s;
4434	long j;
4435	int accum_moved = 0;
4436	int will_exit_fast_recovery = 0;
4437	uint32_t a_rwnd;
4438	struct sctp_nets *net = NULL;
4439	int nonce_sum_flag, ecn_seg_sums = 0;
4440	uint8_t reneged_all = 0;
4441	uint8_t cmt_dac_flag;
4442
4443	/*
4444	 * we take any chance we can to service our queues since we cannot
4445	 * get awoken when the socket is read from :<
4446	 */
4447	/*
4448	 * Now perform the actual SACK handling: 1) Verify that it is not an
4449	 * old sack, if so discard. 2) If there is nothing left in the send
4450	 * queue (cum-ack is equal to last acked) then you have a duplicate
4451	 * too, update any rwnd change and verify no timers are running.
4452	 * then return. 3) Process any new consequtive data i.e. cum-ack
4453	 * moved process these first and note that it moved. 4) Process any
4454	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4455	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4456	 * sync up flightsizes and things, stop all timers and also check
4457	 * for shutdown_pending state. If so then go ahead and send off the
4458	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4459	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4460	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4461	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4462	 * if in shutdown_recv state.
4463	 */
4464	SCTP_TCB_LOCK_ASSERT(stcb);
4465	sack = &ch->sack;
4466	/* CMT DAC algo */
4467	this_sack_lowest_newack = 0;
4468	j = 0;
4469	sack_length = ntohs(ch->ch.chunk_length);
4470	if (sack_length < sizeof(struct sctp_sack_chunk)) {
4471#ifdef SCTP_DEBUG
4472		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4473			printf("Bad size on sack chunk .. to small\n");
4474		}
4475#endif
4476		return;
4477	}
4478	/* ECN Nonce */
4479	SCTP_STAT_INCR(sctps_slowpath_sack);
4480	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4481	cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4482	num_seg = ntohs(sack->num_gap_ack_blks);
4483	a_rwnd = (uint32_t) ntohl(sack->a_rwnd);
4484
4485	/* CMT DAC algo */
4486	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4487	num_dup = ntohs(sack->num_dup_tsns);
4488
4489
4490	stcb->asoc.overall_error_count = 0;
4491	asoc = &stcb->asoc;
4492#ifdef SCTP_SACK_LOGGING
4493	sctp_log_sack(asoc->last_acked_seq,
4494	    cum_ack,
4495	    0,
4496	    num_seg,
4497	    num_dup,
4498	    SCTP_LOG_NEW_SACK);
4499#endif
4500#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
4501	if (num_dup) {
4502		int off_to_dup, iii;
4503		uint32_t *dupdata;
4504
4505		off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4506		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4507			dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup);
4508			for (iii = 0; iii < num_dup; iii++) {
4509				sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4510				dupdata++;
4511
4512			}
4513		} else {
4514			printf("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4515			    off_to_dup, num_dup, sack_length, num_seg);
4516		}
4517	}
4518#endif
4519	/* reality check */
4520	if (TAILQ_EMPTY(&asoc->send_queue)) {
4521		send_s = asoc->sending_seq;
4522	} else {
4523		tp1 = TAILQ_FIRST(&asoc->send_queue);
4524		send_s = tp1->rec.data.TSN_seq;
4525	}
4526
4527	if (sctp_strict_sacks) {
4528		if (cum_ack == send_s ||
4529		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4530#ifdef INVARIANTS		/* for testing only */
4531	hopeless_peer:
4532			panic("Impossible sack 1");
4533#else
4534			struct mbuf *oper;
4535
4536			/*
4537			 * no way, we have not even sent this TSN out yet.
4538			 * Peer is hopelessly messed up with us.
4539			 */
4540	hopeless_peer:
4541			*abort_now = 1;
4542			/* XXX */
4543			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4544			    0, M_DONTWAIT, 1, MT_DATA);
4545			if (oper) {
4546				struct sctp_paramhdr *ph;
4547				uint32_t *ippp;
4548
4549				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4550				    sizeof(uint32_t);
4551				ph = mtod(oper, struct sctp_paramhdr *);
4552				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4553				ph->param_length = htons(SCTP_BUF_LEN(oper));
4554				ippp = (uint32_t *) (ph + 1);
4555				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4556			}
4557			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4558			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
4559			return;
4560#endif
4561		}
4562	}
4563	/**********************/
4564	/* 1) check the range */
4565	/**********************/
4566	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4567		/* acking something behind */
4568		return;
4569	}
4570	/* update the Rwnd of the peer */
4571	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4572	    TAILQ_EMPTY(&asoc->send_queue) &&
4573	    (asoc->stream_queue_cnt == 0)
4574	    ) {
4575		/* nothing left on send/sent and strmq */
4576#ifdef SCTP_LOG_RWND
4577		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4578		    asoc->peers_rwnd, 0, 0, a_rwnd);
4579#endif
4580		asoc->peers_rwnd = a_rwnd;
4581		if (asoc->sent_queue_retran_cnt) {
4582			asoc->sent_queue_retran_cnt = 0;
4583		}
4584		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4585			/* SWS sender side engages */
4586			asoc->peers_rwnd = 0;
4587		}
4588		/* stop any timers */
4589		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4590			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4591			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4592			if (sctp_early_fr) {
4593				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4594					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4595					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4596					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4597				}
4598			}
4599			net->partial_bytes_acked = 0;
4600			net->flight_size = 0;
4601		}
4602		asoc->total_flight = 0;
4603		asoc->total_flight_count = 0;
4604		return;
4605	}
4606	/*
4607	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4608	 * things. The total byte count acked is tracked in netAckSz AND
4609	 * netAck2 is used to track the total bytes acked that are un-
4610	 * amibguious and were never retransmitted. We track these on a per
4611	 * destination address basis.
4612	 */
4613	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4614		net->prev_cwnd = net->cwnd;
4615		net->net_ack = 0;
4616		net->net_ack2 = 0;
4617
4618		/*
4619		 * CMT: Reset CUC algo variable before SACK processing
4620		 */
4621		net->new_pseudo_cumack = 0;
4622		net->will_exit_fast_recovery = 0;
4623	}
4624	/* process the new consecutive TSN first */
4625	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4626	while (tp1) {
4627		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4628		    MAX_TSN) ||
4629		    last_tsn == tp1->rec.data.TSN_seq) {
4630			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4631				/*
4632				 * ECN Nonce: Add the nonce to the sender's
4633				 * nonce sum
4634				 */
4635				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4636				accum_moved = 1;
4637				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4638					/*
4639					 * If it is less than ACKED, it is
4640					 * now no-longer in flight. Higher
4641					 * values may occur during marking
4642					 */
4643					if ((tp1->whoTo->dest_state &
4644					    SCTP_ADDR_UNCONFIRMED) &&
4645					    (tp1->snd_count < 2)) {
4646						/*
4647						 * If there was no retran
4648						 * and the address is
4649						 * un-confirmed and we sent
4650						 * there and are now
4651						 * sacked.. its confirmed,
4652						 * mark it so.
4653						 */
4654						tp1->whoTo->dest_state &=
4655						    ~SCTP_ADDR_UNCONFIRMED;
4656					}
4657#ifdef SCTP_FLIGHT_LOGGING
4658					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
4659					    tp1->whoTo->flight_size,
4660					    tp1->book_size,
4661					    (uintptr_t) stcb,
4662					    tp1->rec.data.TSN_seq);
4663#endif
4664					if (tp1->whoTo->flight_size >= tp1->book_size) {
4665						tp1->whoTo->flight_size -= tp1->book_size;
4666					} else {
4667						tp1->whoTo->flight_size = 0;
4668					}
4669					if (asoc->total_flight >= tp1->book_size) {
4670						asoc->total_flight -= tp1->book_size;
4671						if (asoc->total_flight_count > 0)
4672							asoc->total_flight_count--;
4673					} else {
4674						asoc->total_flight = 0;
4675						asoc->total_flight_count = 0;
4676					}
4677					tp1->whoTo->net_ack += tp1->send_size;
4678
4679					/* CMT SFR and DAC algos */
4680					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4681					tp1->whoTo->saw_newack = 1;
4682
4683					if (tp1->snd_count < 2) {
4684						/*
4685						 * True non-retransmited
4686						 * chunk
4687						 */
4688						tp1->whoTo->net_ack2 +=
4689						    tp1->send_size;
4690
4691						/* update RTO too? */
4692						if (tp1->do_rtt) {
4693							tp1->whoTo->RTO =
4694							    sctp_calculate_rto(stcb,
4695							    asoc, tp1->whoTo,
4696							    &tp1->sent_rcv_time);
4697							tp1->whoTo->rto_pending = 0;
4698							tp1->do_rtt = 0;
4699						}
4700					}
4701					/*
4702					 * CMT: CUCv2 algorithm. From the
4703					 * cumack'd TSNs, for each TSN being
4704					 * acked for the first time, set the
4705					 * following variables for the
4706					 * corresp destination.
4707					 * new_pseudo_cumack will trigger a
4708					 * cwnd update.
4709					 * find_(rtx_)pseudo_cumack will
4710					 * trigger search for the next
4711					 * expected (rtx-)pseudo-cumack.
4712					 */
4713					tp1->whoTo->new_pseudo_cumack = 1;
4714					tp1->whoTo->find_pseudo_cumack = 1;
4715					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4716
4717
4718#ifdef SCTP_SACK_LOGGING
4719					sctp_log_sack(asoc->last_acked_seq,
4720					    cum_ack,
4721					    tp1->rec.data.TSN_seq,
4722					    0,
4723					    0,
4724					    SCTP_LOG_TSN_ACKED);
4725#endif
4726#ifdef SCTP_CWND_LOGGING
4727					sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4728#endif
4729				}
4730				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4731					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4732#ifdef SCTP_AUDITING_ENABLED
4733					sctp_audit_log(0xB3,
4734					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4735#endif
4736				}
4737				tp1->sent = SCTP_DATAGRAM_ACKED;
4738			}
4739		} else {
4740			break;
4741		}
4742		tp1 = TAILQ_NEXT(tp1, sctp_next);
4743	}
4744	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4745	/* always set this up to cum-ack */
4746	asoc->this_sack_highest_gap = last_tsn;
4747
4748	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
4749
4750		/* skip corrupt segments */
4751		goto skip_segments;
4752	}
4753	if (num_seg > 0) {
4754
4755		/*
4756		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4757		 * to be greater than the cumack. Also reset saw_newack to 0
4758		 * for all dests.
4759		 */
4760		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4761			net->saw_newack = 0;
4762			net->this_sack_highest_newack = last_tsn;
4763		}
4764
4765		/*
4766		 * thisSackHighestGap will increase while handling NEW
4767		 * segments this_sack_highest_newack will increase while
4768		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4769		 * used for CMT DAC algo. saw_newack will also change.
4770		 */
4771		sctp_handle_segments(stcb, asoc, ch, last_tsn,
4772		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4773		    num_seg, &ecn_seg_sums);
4774
4775		if (sctp_strict_sacks) {
4776			/*
4777			 * validate the biggest_tsn_acked in the gap acks if
4778			 * strict adherence is wanted.
4779			 */
4780			if ((biggest_tsn_acked == send_s) ||
4781			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4782				/*
4783				 * peer is either confused or we are under
4784				 * attack. We must abort.
4785				 */
4786				goto hopeless_peer;
4787			}
4788		}
4789	}
4790skip_segments:
4791	/*******************************************/
4792	/* cancel ALL T3-send timer if accum moved */
4793	/*******************************************/
4794	if (sctp_cmt_on_off) {
4795		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4796			if (net->new_pseudo_cumack)
4797				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4798				    stcb, net,
4799				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4800
4801		}
4802	} else {
4803		if (accum_moved) {
4804			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4805				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4806				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4807			}
4808		}
4809	}
4810	/********************************************/
4811	/* drop the acked chunks from the sendqueue */
4812	/********************************************/
4813	asoc->last_acked_seq = cum_ack;
4814
4815	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4816	if (tp1 == NULL)
4817		goto done_with_it;
4818	do {
4819		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4820		    MAX_TSN)) {
4821			break;
4822		}
4823		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4824			/* no more sent on list */
4825			break;
4826		}
4827		tp2 = TAILQ_NEXT(tp1, sctp_next);
4828		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4829		/*
4830		 * Friendlier printf in lieu of panic now that I think its
4831		 * fixed
4832		 */
4833
4834		if (tp1->pr_sctp_on) {
4835			if (asoc->pr_sctp_cnt != 0)
4836				asoc->pr_sctp_cnt--;
4837		}
4838		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4839		    (asoc->total_flight > 0)) {
4840			printf("Warning flight size incorrect should be 0 is %d\n",
4841			    asoc->total_flight);
4842			asoc->total_flight = 0;
4843		}
4844		if (tp1->data) {
4845			sctp_free_bufspace(stcb, asoc, tp1, 1);
4846			sctp_m_freem(tp1->data);
4847			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4848				asoc->sent_queue_cnt_removeable--;
4849			}
4850		}
4851#ifdef SCTP_SACK_LOGGING
4852		sctp_log_sack(asoc->last_acked_seq,
4853		    cum_ack,
4854		    tp1->rec.data.TSN_seq,
4855		    0,
4856		    0,
4857		    SCTP_LOG_FREE_SENT);
4858#endif
4859		tp1->data = NULL;
4860		asoc->sent_queue_cnt--;
4861		sctp_free_remote_addr(tp1->whoTo);
4862
4863		sctp_free_a_chunk(stcb, tp1);
4864		wake_him++;
4865		tp1 = tp2;
4866	} while (tp1 != NULL);
4867
4868done_with_it:
4869	if ((wake_him) && (stcb->sctp_socket)) {
4870		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4871#ifdef SCTP_WAKE_LOGGING
4872		sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4873#endif
4874		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4875#ifdef SCTP_WAKE_LOGGING
4876	} else {
4877		sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4878#endif
4879	}
4880
4881	if ((sctp_cmt_on_off == 0) && asoc->fast_retran_loss_recovery && accum_moved) {
4882		if (compare_with_wrap(asoc->last_acked_seq,
4883		    asoc->fast_recovery_tsn, MAX_TSN) ||
4884		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4885			/* Setup so we will exit RFC2582 fast recovery */
4886			will_exit_fast_recovery = 1;
4887		}
4888	}
4889	/*
4890	 * Check for revoked fragments:
4891	 *
4892	 * if Previous sack - Had no frags then we can't have any revoked if
4893	 * Previous sack - Had frag's then - If we now have frags aka
4894	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4895	 * some of them. else - The peer revoked all ACKED fragments, since
4896	 * we had some before and now we have NONE.
4897	 */
4898
4899	if (sctp_cmt_on_off) {
4900		/*
4901		 * Don't check for revoked if CMT is ON. CMT causes
4902		 * reordering of data and acks (received on different
4903		 * interfaces) can be persistently reordered. Acking
4904		 * followed by apparent revoking and re-acking causes
4905		 * unexpected weird behavior. So, at this time, CMT does not
4906		 * respect renegs. Renegs will have to be recovered through
4907		 * a timeout. Not a big deal for such a rare event.
4908		 */
4909	} else if (num_seg)
4910		sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
4911	else if (asoc->saw_sack_with_frags) {
4912		int cnt_revoked = 0;
4913
4914		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4915		if (tp1 != NULL) {
4916			/* Peer revoked all dg's marked or acked */
4917			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4918				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
4919				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
4920					tp1->sent = SCTP_DATAGRAM_SENT;
4921					tp1->rec.data.chunk_was_revoked = 1;
4922					tp1->whoTo->flight_size += tp1->book_size;
4923					asoc->total_flight_count++;
4924					asoc->total_flight += tp1->book_size;
4925					cnt_revoked++;
4926				}
4927			}
4928			if (cnt_revoked) {
4929				reneged_all = 1;
4930			}
4931		}
4932		asoc->saw_sack_with_frags = 0;
4933	}
4934	if (num_seg)
4935		asoc->saw_sack_with_frags = 1;
4936	else
4937		asoc->saw_sack_with_frags = 0;
4938
4939
4940	sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4941
4942	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4943		/* nothing left in-flight */
4944		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4945			/* stop all timers */
4946			if (sctp_early_fr) {
4947				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4948					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4949					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4950					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4951				}
4952			}
4953			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4954			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4955			net->flight_size = 0;
4956			net->partial_bytes_acked = 0;
4957		}
4958		asoc->total_flight = 0;
4959		asoc->total_flight_count = 0;
4960	}
4961	/**********************************/
4962	/* Now what about shutdown issues */
4963	/**********************************/
4964	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4965		/* nothing left on sendqueue.. consider done */
4966#ifdef SCTP_LOG_RWND
4967		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4968		    asoc->peers_rwnd, 0, 0, a_rwnd);
4969#endif
4970		asoc->peers_rwnd = a_rwnd;
4971		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4972			/* SWS sender side engages */
4973			asoc->peers_rwnd = 0;
4974		}
4975		/* clean up */
4976		if ((asoc->stream_queue_cnt == 1) &&
4977		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4978		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4979		    (asoc->locked_on_sending)
4980		    ) {
4981			struct sctp_stream_queue_pending *sp;
4982
4983			/*
4984			 * I may be in a state where we got all across.. but
4985			 * cannot write more due to a shutdown... we abort
4986			 * since the user did not indicate EOR in this case.
4987			 */
4988			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4989			    sctp_streamhead);
4990			if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
4991				asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4992				asoc->locked_on_sending = NULL;
4993				asoc->stream_queue_cnt--;
4994			}
4995		}
4996		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4997		    (asoc->stream_queue_cnt == 0)) {
4998			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4999				/* Need to abort here */
5000				struct mbuf *oper;
5001
5002		abort_out_now:
5003				*abort_now = 1;
5004				/* XXX */
5005				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5006				    0, M_DONTWAIT, 1, MT_DATA);
5007				if (oper) {
5008					struct sctp_paramhdr *ph;
5009					uint32_t *ippp;
5010
5011					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5012					    sizeof(uint32_t);
5013					ph = mtod(oper, struct sctp_paramhdr *);
5014					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5015					ph->param_length = htons(SCTP_BUF_LEN(oper));
5016					ippp = (uint32_t *) (ph + 1);
5017					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5018				}
5019				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5020				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
5021				return;
5022			} else {
5023				asoc->state = SCTP_STATE_SHUTDOWN_SENT;
5024				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5025				sctp_stop_timers_for_shutdown(stcb);
5026				sctp_send_shutdown(stcb,
5027				    stcb->asoc.primary_destination);
5028				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5029				    stcb->sctp_ep, stcb, asoc->primary_destination);
5030				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5031				    stcb->sctp_ep, stcb, asoc->primary_destination);
5032			}
5033			return;
5034		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5035		    (asoc->stream_queue_cnt == 0)) {
5036			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5037				goto abort_out_now;
5038			}
5039			asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
5040			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5041			sctp_send_shutdown_ack(stcb,
5042			    stcb->asoc.primary_destination);
5043
5044			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5045			    stcb->sctp_ep, stcb, asoc->primary_destination);
5046			return;
5047		}
5048	}
5049	/*
5050	 * Now here we are going to recycle net_ack for a different use...
5051	 * HEADS UP.
5052	 */
5053	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5054		net->net_ack = 0;
5055	}
5056
5057	/*
5058	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5059	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5060	 * automatically ensure that.
5061	 */
5062	if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) {
5063		this_sack_lowest_newack = cum_ack;
5064	}
5065	if (num_seg > 0) {
5066		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5067		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5068	}
5069	/*********************************************/
5070	/* Here we perform PR-SCTP procedures        */
5071	/* (section 4.2)                             */
5072	/*********************************************/
5073	/* C1. update advancedPeerAckPoint */
5074	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5075		asoc->advanced_peer_ack_point = cum_ack;
5076	}
5077	/* C2. try to further move advancedPeerAckPoint ahead */
5078
5079	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5080		struct sctp_tmit_chunk *lchk;
5081
5082		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5083		/* C3. See if we need to send a Fwd-TSN */
5084		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5085		    MAX_TSN)) {
5086			/*
5087			 * ISSUE with ECN, see FWD-TSN processing for notes
5088			 * on issues that will occur when the ECN NONCE
5089			 * stuff is put into SCTP for cross checking.
5090			 */
5091			send_forward_tsn(stcb, asoc);
5092
5093			/*
5094			 * ECN Nonce: Disable Nonce Sum check when FWD TSN
5095			 * is sent and store resync tsn
5096			 */
5097			asoc->nonce_sum_check = 0;
5098			asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5099			if (lchk) {
5100				/* Assure a timer is up */
5101				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5102				    stcb->sctp_ep, stcb, lchk->whoTo);
5103			}
5104		}
5105	}
5106	/*
5107	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
5108	 * (net->fast_retran_loss_recovery == 0)))
5109	 */
5110	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5111		if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
5112			/* out of a RFC2582 Fast recovery window? */
5113			if (net->net_ack > 0) {
5114				/*
5115				 * per section 7.2.3, are there any
5116				 * destinations that had a fast retransmit
5117				 * to them. If so what we need to do is
5118				 * adjust ssthresh and cwnd.
5119				 */
5120				struct sctp_tmit_chunk *lchk;
5121
5122#ifdef  SCTP_HIGH_SPEED
5123				sctp_hs_cwnd_decrease(stcb, net);
5124#else
5125#ifdef SCTP_CWND_MONITOR
5126				int old_cwnd = net->cwnd;
5127
5128#endif
5129				net->ssthresh = net->cwnd / 2;
5130				if (net->ssthresh < (net->mtu * 2)) {
5131					net->ssthresh = 2 * net->mtu;
5132				}
5133				net->cwnd = net->ssthresh;
5134#ifdef SCTP_CWND_MONITOR
5135				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
5136				    SCTP_CWND_LOG_FROM_FR);
5137#endif
5138#endif
5139
5140				lchk = TAILQ_FIRST(&asoc->send_queue);
5141
5142				net->partial_bytes_acked = 0;
5143				/* Turn on fast recovery window */
5144				asoc->fast_retran_loss_recovery = 1;
5145				if (lchk == NULL) {
5146					/* Mark end of the window */
5147					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
5148				} else {
5149					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
5150				}
5151
5152				/*
5153				 * CMT fast recovery -- per destination
5154				 * recovery variable.
5155				 */
5156				net->fast_retran_loss_recovery = 1;
5157
5158				if (lchk == NULL) {
5159					/* Mark end of the window */
5160					net->fast_recovery_tsn = asoc->sending_seq - 1;
5161				} else {
5162					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
5163				}
5164
5165
5166
5167				/*
5168				 * Disable Nonce Sum Checking and store the
5169				 * resync tsn
5170				 */
5171				asoc->nonce_sum_check = 0;
5172				asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
5173
5174				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
5175				    stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5176				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5177				    stcb->sctp_ep, stcb, net);
5178			}
5179		} else if (net->net_ack > 0) {
5180			/*
5181			 * Mark a peg that we WOULD have done a cwnd
5182			 * reduction but RFC2582 prevented this action.
5183			 */
5184			SCTP_STAT_INCR(sctps_fastretransinrtt);
5185		}
5186	}
5187
5188
5189	/******************************************************************
5190	 *  Here we do the stuff with ECN Nonce checking.
5191	 *  We basically check to see if the nonce sum flag was incorrect
5192	 *  or if resynchronization needs to be done. Also if we catch a
5193	 *  misbehaving receiver we give him the kick.
5194	 ******************************************************************/
5195
5196	if (asoc->ecn_nonce_allowed) {
5197		if (asoc->nonce_sum_check) {
5198			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5199				if (asoc->nonce_wait_for_ecne == 0) {
5200					struct sctp_tmit_chunk *lchk;
5201
5202					lchk = TAILQ_FIRST(&asoc->send_queue);
5203					asoc->nonce_wait_for_ecne = 1;
5204					if (lchk) {
5205						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5206					} else {
5207						asoc->nonce_wait_tsn = asoc->sending_seq;
5208					}
5209				} else {
5210					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5211					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5212						/*
5213						 * Misbehaving peer. We need
5214						 * to react to this guy
5215						 */
5216						asoc->ecn_allowed = 0;
5217						asoc->ecn_nonce_allowed = 0;
5218					}
5219				}
5220			}
5221		} else {
5222			/* See if Resynchronization Possible */
5223			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5224				asoc->nonce_sum_check = 1;
5225				/*
5226				 * now we must calculate what the base is.
5227				 * We do this based on two things, we know
5228				 * the total's for all the segments
5229				 * gap-acked in the SACK, its stored in
5230				 * ecn_seg_sums. We also know the SACK's
5231				 * nonce sum, its in nonce_sum_flag. So we
5232				 * can build a truth table to back-calculate
5233				 * the new value of
5234				 * asoc->nonce_sum_expect_base:
5235				 *
5236				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5237				 * 1                    0 1 0 1 1 1
5238				 * 1 0
5239				 */
5240				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5241			}
5242		}
5243	}
5244	/* Now are we exiting loss recovery ? */
5245	if (will_exit_fast_recovery) {
5246		/* Ok, we must exit fast recovery */
5247		asoc->fast_retran_loss_recovery = 0;
5248	}
5249	if ((asoc->sat_t3_loss_recovery) &&
5250	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5251	    MAX_TSN) ||
5252	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5253		/* end satellite t3 loss recovery */
5254		asoc->sat_t3_loss_recovery = 0;
5255	}
5256	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5257		if (net->will_exit_fast_recovery) {
5258			/* Ok, we must exit fast recovery */
5259			net->fast_retran_loss_recovery = 0;
5260		}
5261	}
5262
5263	/* Adjust and set the new rwnd value */
5264#ifdef SCTP_LOG_RWND
5265	sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5266	    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
5267#endif
5268
5269	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5270	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
5271	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5272		/* SWS sender side engages */
5273		asoc->peers_rwnd = 0;
5274	}
5275	/*
5276	 * Now we must setup so we have a timer up for anyone with
5277	 * outstanding data.
5278	 */
5279again:
5280	j = 0;
5281	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5282		if (net->flight_size) {
5283			j++;
5284			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5285			    stcb->sctp_ep, stcb, net);
5286		}
5287	}
5288	if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) {
5289		/* huh, this should not happen */
5290#ifdef INVARIANTS
5291		panic("Flight size incorrect? fixing??");
5292#else
5293		printf("Flight size incorrect? fixing??\n");
5294		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5295			net->flight_size = 0;
5296		}
5297		asoc->total_flight = 0;
5298		asoc->total_flight_count = 0;
5299		asoc->sent_queue_retran_cnt = 0;
5300		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5301			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5302				tp1->whoTo->flight_size += tp1->book_size;
5303				asoc->total_flight += tp1->book_size;
5304				asoc->total_flight_count++;
5305			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5306				asoc->sent_queue_retran_cnt++;
5307			}
5308		}
5309#endif
5310		goto again;
5311	}
5312#ifdef SCTP_SACK_RWND_LOGGING
5313	sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5314	    a_rwnd,
5315	    stcb->asoc.peers_rwnd,
5316	    stcb->asoc.total_flight,
5317	    stcb->asoc.total_output_queue_size);
5318
5319#endif
5320
5321}
5322
5323void
5324sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5325    struct sctp_nets *netp, int *abort_flag)
5326{
5327	/* Copy cum-ack */
5328	uint32_t cum_ack, a_rwnd;
5329
5330	cum_ack = ntohl(cp->cumulative_tsn_ack);
5331	/* Arrange so a_rwnd does NOT change */
5332	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5333
5334	/* Now call the express sack handling */
5335	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5336}
5337
5338static void
5339sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5340    struct sctp_stream_in *strmin)
5341{
5342	struct sctp_queued_to_read *ctl, *nctl;
5343	struct sctp_association *asoc;
5344	int tt;
5345
5346	asoc = &stcb->asoc;
5347	tt = strmin->last_sequence_delivered;
5348	/*
5349	 * First deliver anything prior to and including the stream no that
5350	 * came in
5351	 */
5352	ctl = TAILQ_FIRST(&strmin->inqueue);
5353	while (ctl) {
5354		nctl = TAILQ_NEXT(ctl, next);
5355		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5356		    (tt == ctl->sinfo_ssn)) {
5357			/* this is deliverable now */
5358			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5359			/* subtract pending on streams */
5360			asoc->size_on_all_streams -= ctl->length;
5361			sctp_ucount_decr(asoc->cnt_on_all_streams);
5362			/* deliver it to at least the delivery-q */
5363			if (stcb->sctp_socket) {
5364				sctp_add_to_readq(stcb->sctp_ep, stcb,
5365				    ctl,
5366				    &stcb->sctp_socket->so_rcv, 1);
5367			}
5368		} else {
5369			/* no more delivery now. */
5370			break;
5371		}
5372		ctl = nctl;
5373	}
5374	/*
5375	 * now we must deliver things in queue the normal way  if any are
5376	 * now ready.
5377	 */
5378	tt = strmin->last_sequence_delivered + 1;
5379	ctl = TAILQ_FIRST(&strmin->inqueue);
5380	while (ctl) {
5381		nctl = TAILQ_NEXT(ctl, next);
5382		if (tt == ctl->sinfo_ssn) {
5383			/* this is deliverable now */
5384			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5385			/* subtract pending on streams */
5386			asoc->size_on_all_streams -= ctl->length;
5387			sctp_ucount_decr(asoc->cnt_on_all_streams);
5388			/* deliver it to at least the delivery-q */
5389			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5390			if (stcb->sctp_socket) {
5391				sctp_add_to_readq(stcb->sctp_ep, stcb,
5392				    ctl,
5393				    &stcb->sctp_socket->so_rcv, 1);
5394			}
5395			tt = strmin->last_sequence_delivered + 1;
5396		} else {
5397			break;
5398		}
5399		ctl = nctl;
5400	}
5401}
5402
5403void
5404sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5405    struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
5406{
5407	/*
5408	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5409	 * forward TSN, when the SACK comes back that acknowledges the
5410	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5411	 * get quite tricky since we may have sent more data interveneing
5412	 * and must carefully account for what the SACK says on the nonce
5413	 * and any gaps that are reported. This work will NOT be done here,
5414	 * but I note it here since it is really related to PR-SCTP and
5415	 * FWD-TSN's
5416	 */
5417
5418	/* The pr-sctp fwd tsn */
5419	/*
5420	 * here we will perform all the data receiver side steps for
5421	 * processing FwdTSN, as required in by pr-sctp draft:
5422	 *
5423	 * Assume we get FwdTSN(x):
5424	 *
5425	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5426	 * others we have 3) examine and update re-ordering queue on
5427	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5428	 * report where we are.
5429	 */
5430	struct sctp_strseq *stseq;
5431	struct sctp_association *asoc;
5432	uint32_t new_cum_tsn, gap, back_out_htsn;
5433	unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
5434	struct sctp_stream_in *strm;
5435	struct sctp_tmit_chunk *chk, *at;
5436
5437	cumack_set_flag = 0;
5438	asoc = &stcb->asoc;
5439	cnt_gone = 0;
5440	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5441#ifdef SCTP_DEBUG
5442		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
5443			printf("Bad size too small/big fwd-tsn\n");
5444		}
5445#endif
5446		return;
5447	}
5448	m_size = (stcb->asoc.mapping_array_size << 3);
5449	/*************************************************************/
5450	/* 1. Here we update local cumTSN and shift the bitmap array */
5451	/*************************************************************/
5452	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5453
5454	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5455	    asoc->cumulative_tsn == new_cum_tsn) {
5456		/* Already got there ... */
5457		return;
5458	}
5459	back_out_htsn = asoc->highest_tsn_inside_map;
5460	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5461	    MAX_TSN)) {
5462		asoc->highest_tsn_inside_map = new_cum_tsn;
5463#ifdef SCTP_MAP_LOGGING
5464		sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5465#endif
5466	}
5467	/*
5468	 * now we know the new TSN is more advanced, let's find the actual
5469	 * gap
5470	 */
5471	if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
5472	    MAX_TSN)) ||
5473	    (new_cum_tsn == asoc->mapping_array_base_tsn)) {
5474		gap = new_cum_tsn - asoc->mapping_array_base_tsn;
5475	} else {
5476		/* try to prevent underflow here */
5477		gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5478	}
5479
5480	if (gap > m_size || gap < 0) {
5481		asoc->highest_tsn_inside_map = back_out_htsn;
5482		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5483			/*
5484			 * out of range (of single byte chunks in the rwnd I
5485			 * give out) too questionable. better to drop it
5486			 * silently
5487			 */
5488			return;
5489		}
5490		if (asoc->highest_tsn_inside_map >
5491		    asoc->mapping_array_base_tsn) {
5492			gap = asoc->highest_tsn_inside_map -
5493			    asoc->mapping_array_base_tsn;
5494		} else {
5495			gap = asoc->highest_tsn_inside_map +
5496			    (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5497		}
5498		cumack_set_flag = 1;
5499	}
5500	for (i = 0; i <= gap; i++) {
5501		SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
5502	}
5503	/*
5504	 * Now after marking all, slide thing forward but no sack please.
5505	 */
5506	sctp_sack_check(stcb, 0, 0, abort_flag);
5507	if (*abort_flag)
5508		return;
5509
5510	if (cumack_set_flag) {
5511		/*
5512		 * fwd-tsn went outside my gap array - not a common
5513		 * occurance. Do the same thing we do when a cookie-echo
5514		 * arrives.
5515		 */
5516		asoc->highest_tsn_inside_map = new_cum_tsn - 1;
5517		asoc->mapping_array_base_tsn = new_cum_tsn;
5518		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
5519#ifdef SCTP_MAP_LOGGING
5520		sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5521#endif
5522		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5523	}
5524	/*************************************************************/
5525	/* 2. Clear up re-assembly queue                             */
5526	/*************************************************************/
5527
5528	/*
5529	 * First service it if pd-api is up, just in case we can progress it
5530	 * forward
5531	 */
5532	if (asoc->fragmented_delivery_inprogress) {
5533		sctp_service_reassembly(stcb, asoc);
5534	}
5535	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5536		/* For each one on here see if we need to toss it */
5537		/*
5538		 * For now large messages held on the reasmqueue that are
5539		 * complete will be tossed too. We could in theory do more
5540		 * work to spin through and stop after dumping one msg aka
5541		 * seeing the start of a new msg at the head, and call the
5542		 * delivery function... to see if it can be delivered... But
5543		 * for now we just dump everything on the queue.
5544		 */
5545		chk = TAILQ_FIRST(&asoc->reasmqueue);
5546		while (chk) {
5547			at = TAILQ_NEXT(chk, sctp_next);
5548			if (compare_with_wrap(asoc->cumulative_tsn,
5549			    chk->rec.data.TSN_seq, MAX_TSN) ||
5550			    asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
5551				/* It needs to be tossed */
5552				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5553				if (compare_with_wrap(chk->rec.data.TSN_seq,
5554				    asoc->tsn_last_delivered, MAX_TSN)) {
5555					asoc->tsn_last_delivered =
5556					    chk->rec.data.TSN_seq;
5557					asoc->str_of_pdapi =
5558					    chk->rec.data.stream_number;
5559					asoc->ssn_of_pdapi =
5560					    chk->rec.data.stream_seq;
5561					asoc->fragment_flags =
5562					    chk->rec.data.rcv_flags;
5563				}
5564				asoc->size_on_reasm_queue -= chk->send_size;
5565				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5566				cnt_gone++;
5567
5568				/* Clear up any stream problem */
5569				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5570				    SCTP_DATA_UNORDERED &&
5571				    (compare_with_wrap(chk->rec.data.stream_seq,
5572				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5573				    MAX_SEQ))) {
5574					/*
5575					 * We must dump forward this streams
5576					 * sequence number if the chunk is
5577					 * not unordered that is being
5578					 * skipped. There is a chance that
5579					 * if the peer does not include the
5580					 * last fragment in its FWD-TSN we
5581					 * WILL have a problem here since
5582					 * you would have a partial chunk in
5583					 * queue that may not be
5584					 * deliverable. Also if a Partial
5585					 * delivery API as started the user
5586					 * may get a partial chunk. The next
5587					 * read returning a new chunk...
5588					 * really ugly but I see no way
5589					 * around it! Maybe a notify??
5590					 */
5591					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5592					    chk->rec.data.stream_seq;
5593				}
5594				if (chk->data) {
5595					sctp_m_freem(chk->data);
5596					chk->data = NULL;
5597				}
5598				sctp_free_remote_addr(chk->whoTo);
5599				sctp_free_a_chunk(stcb, chk);
5600			} else {
5601				/*
5602				 * Ok we have gone beyond the end of the
5603				 * fwd-tsn's mark. Some checks...
5604				 */
5605				if ((asoc->fragmented_delivery_inprogress) &&
5606				    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5607					/*
5608					 * Special case PD-API is up and
5609					 * what we fwd-tsn' over includes
5610					 * one that had the LAST_FRAG. We no
5611					 * longer need to do the PD-API.
5612					 */
5613					asoc->fragmented_delivery_inprogress = 0;
5614					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5615					    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
5616
5617				}
5618				break;
5619			}
5620			chk = at;
5621		}
5622	}
5623	if (asoc->fragmented_delivery_inprogress) {
5624		/*
5625		 * Ok we removed cnt_gone chunks in the PD-API queue that
5626		 * were being delivered. So now we must turn off the flag.
5627		 */
5628		sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5629		    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
5630		asoc->fragmented_delivery_inprogress = 0;
5631	}
5632	/*************************************************************/
5633	/* 3. Update the PR-stream re-ordering queues                */
5634	/*************************************************************/
5635	stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd));
5636	fwd_sz -= sizeof(*fwd);
5637	{
5638		/* New method. */
5639		int num_str, i;
5640
5641		num_str = fwd_sz / sizeof(struct sctp_strseq);
5642		for (i = 0; i < num_str; i++) {
5643			uint16_t st;
5644			unsigned char *xx;
5645
5646			/* Convert */
5647			xx = (unsigned char *)&stseq[i];
5648			st = ntohs(stseq[i].stream);
5649			stseq[i].stream = st;
5650			st = ntohs(stseq[i].sequence);
5651			stseq[i].sequence = st;
5652			/* now process */
5653			if (stseq[i].stream > asoc->streamincnt) {
5654				/*
5655				 * It is arguable if we should continue.
5656				 * Since the peer sent bogus stream info we
5657				 * may be in deep trouble.. a return may be
5658				 * a better choice?
5659				 */
5660				continue;
5661			}
5662			strm = &asoc->strmin[stseq[i].stream];
5663			if (compare_with_wrap(stseq[i].sequence,
5664			    strm->last_sequence_delivered, MAX_SEQ)) {
5665				/* Update the sequence number */
5666				strm->last_sequence_delivered =
5667				    stseq[i].sequence;
5668			}
5669			/* now kick the stream the new way */
5670			sctp_kick_prsctp_reorder_queue(stcb, strm);
5671		}
5672	}
5673	if (TAILQ_FIRST(&asoc->reasmqueue)) {
5674		/* now lets kick out and check for more fragmented delivery */
5675		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5676	}
5677}
5678