sctp_indata.c revision 189790
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 189790 2009-03-14 13:42:13Z rrs $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_indata.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_timer.h>
47
48
49/*
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
53 *
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
56 * the list.
57 */
58
59void
60sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61{
62	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63}
64
65/* Calculate what the rwnd would be */
66uint32_t
67sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68{
69	uint32_t calc = 0;
70
71	/*
72	 * This is really set wrong with respect to a 1-2-m socket. Since
73	 * the sb_cc is the count that everyone as put up. When we re-write
74	 * sctp_soreceive then we will fix this so that ONLY this
75	 * associations data is taken into account.
76	 */
77	if (stcb->sctp_socket == NULL)
78		return (calc);
79
80	if (stcb->asoc.sb_cc == 0 &&
81	    asoc->size_on_reasm_queue == 0 &&
82	    asoc->size_on_all_streams == 0) {
83		/* Full rwnd granted */
84		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85		return (calc);
86	}
87	/* get actual space */
88	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89
90	/*
91	 * take out what has NOT been put on socket queue and we yet hold
92	 * for putting up.
93	 */
94	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96
97	if (calc == 0) {
98		/* out of space */
99		return (calc);
100	}
101	/* what is the overhead of all these rwnd's */
102	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
103	/*
104	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105	 * even it is 0. SWS engaged
106	 */
107	if (calc < stcb->asoc.my_rwnd_control_len) {
108		calc = 1;
109	}
110	return (calc);
111}
112
113
114
115/*
116 * Build out our readq entry based on the incoming packet.
117 */
118struct sctp_queued_to_read *
119sctp_build_readq_entry(struct sctp_tcb *stcb,
120    struct sctp_nets *net,
121    uint32_t tsn, uint32_t ppid,
122    uint32_t context, uint16_t stream_no,
123    uint16_t stream_seq, uint8_t flags,
124    struct mbuf *dm)
125{
126	struct sctp_queued_to_read *read_queue_e = NULL;
127
128	sctp_alloc_a_readq(stcb, read_queue_e);
129	if (read_queue_e == NULL) {
130		goto failed_build;
131	}
132	read_queue_e->sinfo_stream = stream_no;
133	read_queue_e->sinfo_ssn = stream_seq;
134	read_queue_e->sinfo_flags = (flags << 8);
135	read_queue_e->sinfo_ppid = ppid;
136	read_queue_e->sinfo_context = stcb->asoc.context;
137	read_queue_e->sinfo_timetolive = 0;
138	read_queue_e->sinfo_tsn = tsn;
139	read_queue_e->sinfo_cumtsn = tsn;
140	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141	read_queue_e->whoFrom = net;
142	read_queue_e->length = 0;
143	atomic_add_int(&net->ref_count, 1);
144	read_queue_e->data = dm;
145	read_queue_e->spec_flags = 0;
146	read_queue_e->tail_mbuf = NULL;
147	read_queue_e->aux_data = NULL;
148	read_queue_e->stcb = stcb;
149	read_queue_e->port_from = stcb->rport;
150	read_queue_e->do_not_ref_stcb = 0;
151	read_queue_e->end_added = 0;
152	read_queue_e->some_taken = 0;
153	read_queue_e->pdapi_aborted = 0;
154failed_build:
155	return (read_queue_e);
156}
157
158
159/*
160 * Build out our readq entry based on the incoming packet.
161 */
162static struct sctp_queued_to_read *
163sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164    struct sctp_tmit_chunk *chk)
165{
166	struct sctp_queued_to_read *read_queue_e = NULL;
167
168	sctp_alloc_a_readq(stcb, read_queue_e);
169	if (read_queue_e == NULL) {
170		goto failed_build;
171	}
172	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176	read_queue_e->sinfo_context = stcb->asoc.context;
177	read_queue_e->sinfo_timetolive = 0;
178	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181	read_queue_e->whoFrom = chk->whoTo;
182	read_queue_e->aux_data = NULL;
183	read_queue_e->length = 0;
184	atomic_add_int(&chk->whoTo->ref_count, 1);
185	read_queue_e->data = chk->data;
186	read_queue_e->tail_mbuf = NULL;
187	read_queue_e->stcb = stcb;
188	read_queue_e->port_from = stcb->rport;
189	read_queue_e->spec_flags = 0;
190	read_queue_e->do_not_ref_stcb = 0;
191	read_queue_e->end_added = 0;
192	read_queue_e->some_taken = 0;
193	read_queue_e->pdapi_aborted = 0;
194failed_build:
195	return (read_queue_e);
196}
197
198
199struct mbuf *
200sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201    struct sctp_sndrcvinfo *sinfo)
202{
203	struct sctp_sndrcvinfo *outinfo;
204	struct cmsghdr *cmh;
205	struct mbuf *ret;
206	int len;
207	int use_extended = 0;
208
209	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210		/* user does not want the sndrcv ctl */
211		return (NULL);
212	}
213	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214		use_extended = 1;
215		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
216	} else {
217		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
218	}
219
220
221	ret = sctp_get_mbuf_for_msg(len,
222	    0, M_DONTWAIT, 1, MT_DATA);
223
224	if (ret == NULL) {
225		/* No space */
226		return (ret);
227	}
228	/* We need a CMSG header followed by the struct  */
229	cmh = mtod(ret, struct cmsghdr *);
230	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231	cmh->cmsg_level = IPPROTO_SCTP;
232	if (use_extended) {
233		cmh->cmsg_type = SCTP_EXTRCV;
234		cmh->cmsg_len = len;
235		memcpy(outinfo, sinfo, len);
236	} else {
237		cmh->cmsg_type = SCTP_SNDRCV;
238		cmh->cmsg_len = len;
239		*outinfo = *sinfo;
240	}
241	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
242	return (ret);
243}
244
245
246char *
247sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
248    int *control_len,
249    struct sctp_sndrcvinfo *sinfo)
250{
251	struct sctp_sndrcvinfo *outinfo;
252	struct cmsghdr *cmh;
253	char *buf;
254	int len;
255	int use_extended = 0;
256
257	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258		/* user does not want the sndrcv ctl */
259		return (NULL);
260	}
261	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
262		use_extended = 1;
263		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
264	} else {
265		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
266	}
267	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
268	if (buf == NULL) {
269		/* No space */
270		return (buf);
271	}
272	/* We need a CMSG header followed by the struct  */
273	cmh = (struct cmsghdr *)buf;
274	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275	cmh->cmsg_level = IPPROTO_SCTP;
276	if (use_extended) {
277		cmh->cmsg_type = SCTP_EXTRCV;
278		cmh->cmsg_len = len;
279		memcpy(outinfo, sinfo, len);
280	} else {
281		cmh->cmsg_type = SCTP_SNDRCV;
282		cmh->cmsg_len = len;
283		*outinfo = *sinfo;
284	}
285	*control_len = len;
286	return (buf);
287}
288
289
290/*
291 * We are delivering currently from the reassembly queue. We must continue to
292 * deliver until we either: 1) run out of space. 2) run out of sequential
293 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
294 */
295static void
296sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
297{
298	struct sctp_tmit_chunk *chk;
299	uint16_t nxt_todel;
300	uint16_t stream_no;
301	int end = 0;
302	int cntDel;
303
304	/* EY if any out-of-order delivered, then tag it nr on nr_map */
305	uint32_t nr_tsn, nr_gap;
306
307	struct sctp_queued_to_read *control, *ctl, *ctlat;
308
309	if (stcb == NULL)
310		return;
311
312	cntDel = stream_no = 0;
313	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
314	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
315	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
316		/* socket above is long gone or going.. */
317abandon:
318		asoc->fragmented_delivery_inprogress = 0;
319		chk = TAILQ_FIRST(&asoc->reasmqueue);
320		while (chk) {
321			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
322			asoc->size_on_reasm_queue -= chk->send_size;
323			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
324			/*
325			 * Lose the data pointer, since its in the socket
326			 * buffer
327			 */
328			if (chk->data) {
329				sctp_m_freem(chk->data);
330				chk->data = NULL;
331			}
332			/* Now free the address and data */
333			sctp_free_a_chunk(stcb, chk);
334			/* sa_ignore FREED_MEMORY */
335			chk = TAILQ_FIRST(&asoc->reasmqueue);
336		}
337		return;
338	}
339	SCTP_TCB_LOCK_ASSERT(stcb);
340	do {
341		chk = TAILQ_FIRST(&asoc->reasmqueue);
342		if (chk == NULL) {
343			return;
344		}
345		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
346			/* Can't deliver more :< */
347			return;
348		}
349		stream_no = chk->rec.data.stream_number;
350		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
351		if (nxt_todel != chk->rec.data.stream_seq &&
352		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
353			/*
354			 * Not the next sequence to deliver in its stream OR
355			 * unordered
356			 */
357			return;
358		}
359		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
360
361			control = sctp_build_readq_entry_chk(stcb, chk);
362			if (control == NULL) {
363				/* out of memory? */
364				return;
365			}
366			/* save it off for our future deliveries */
367			stcb->asoc.control_pdapi = control;
368			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
369				end = 1;
370			else
371				end = 0;
372			sctp_add_to_readq(stcb->sctp_ep,
373			    stcb, control, &stcb->sctp_socket->so_rcv, end, SCTP_SO_NOT_LOCKED);
374			cntDel++;
375		} else {
376			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
377				end = 1;
378			else
379				end = 0;
380			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
381			    stcb->asoc.control_pdapi,
382			    chk->data, end, chk->rec.data.TSN_seq,
383			    &stcb->sctp_socket->so_rcv)) {
384				/*
385				 * something is very wrong, either
386				 * control_pdapi is NULL, or the tail_mbuf
387				 * is corrupt, or there is a EOM already on
388				 * the mbuf chain.
389				 */
390				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
391					goto abandon;
392				} else {
393#ifdef INVARIANTS
394					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
395						panic("This should not happen control_pdapi NULL?");
396					}
397					/* if we did not panic, it was a EOM */
398					panic("Bad chunking ??");
399#else
400					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
401						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
402					}
403					SCTP_PRINTF("Bad chunking ??\n");
404					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
405
406#endif
407					goto abandon;
408				}
409			}
410			cntDel++;
411		}
412		/* pull it we did it */
413		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
414		/*
415		 * EY this is the chunk that should be tagged nr gapped
416		 * calculate the gap and such then tag this TSN nr
417		 * chk->rec.data.TSN_seq
418		 */
419		/*
420		 * EY!-TODO- this tsn should be tagged nr only if it is
421		 * out-of-order, the if statement should be modified
422		 */
423		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
424
425			nr_tsn = chk->rec.data.TSN_seq;
426			if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
427				nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
428			} else {
429				nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
430			}
431			if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
432			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
433				/*
434				 * EY The 1st should never happen, as in
435				 * process_a_data_chunk method this check
436				 * should be done
437				 */
438				/*
439				 * EY The 2nd should never happen, because
440				 * nr_mapping_array is always expanded when
441				 * mapping_array is expanded
442				 */
443			} else {
444				SCTP_TCB_LOCK_ASSERT(stcb);
445				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
446				if (nr_tsn > asoc->highest_tsn_inside_nr_map)
447					asoc->highest_tsn_inside_nr_map = nr_tsn;
448			}
449		}
450		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
451			asoc->fragmented_delivery_inprogress = 0;
452			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
453				asoc->strmin[stream_no].last_sequence_delivered++;
454			}
455			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
456				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
457			}
458		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
459			/*
460			 * turn the flag back on since we just  delivered
461			 * yet another one.
462			 */
463			asoc->fragmented_delivery_inprogress = 1;
464		}
465		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
466		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
467		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
468		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
469
470		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
471		asoc->size_on_reasm_queue -= chk->send_size;
472		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
473		/* free up the chk */
474		chk->data = NULL;
475		sctp_free_a_chunk(stcb, chk);
476
477		if (asoc->fragmented_delivery_inprogress == 0) {
478			/*
479			 * Now lets see if we can deliver the next one on
480			 * the stream
481			 */
482			struct sctp_stream_in *strm;
483
484			strm = &asoc->strmin[stream_no];
485			nxt_todel = strm->last_sequence_delivered + 1;
486			ctl = TAILQ_FIRST(&strm->inqueue);
487			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
488				while (ctl != NULL) {
489					/* Deliver more if we can. */
490					if (nxt_todel == ctl->sinfo_ssn) {
491						ctlat = TAILQ_NEXT(ctl, next);
492						TAILQ_REMOVE(&strm->inqueue, ctl, next);
493						asoc->size_on_all_streams -= ctl->length;
494						sctp_ucount_decr(asoc->cnt_on_all_streams);
495						strm->last_sequence_delivered++;
496						/*
497						 * EY will be used to
498						 * calculate nr-gap
499						 */
500						nr_tsn = ctl->sinfo_tsn;
501						sctp_add_to_readq(stcb->sctp_ep, stcb,
502						    ctl,
503						    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
504						/*
505						 * EY -now something is
506						 * delivered, calculate
507						 * nr_gap and tag this tsn
508						 * NR
509						 */
510						if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
511
512							if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
513								nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
514							} else {
515								nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
516							}
517							if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
518							    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
519								/*
520								 * EY The
521								 * 1st
522								 * should
523								 * never
524								 * happen,
525								 * as in
526								 * process_a_
527								 * data_chunk
528								 *  method
529								 * this
530								 * check
531								 * should be
532								 * done
533								 */
534								/*
535								 * EY The
536								 * 2nd
537								 * should
538								 * never
539								 * happen,
540								 * because
541								 * nr_mapping
542								 * _array is
543								 * always
544								 * expanded
545								 * when
546								 * mapping_ar
547								 * ray is
548								 * expanded
549								 */
550							} else {
551								SCTP_TCB_LOCK_ASSERT(stcb);
552								SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
553								if (nr_tsn > asoc->highest_tsn_inside_nr_map)
554									asoc->highest_tsn_inside_nr_map = nr_tsn;
555							}
556						}
557						ctl = ctlat;
558					} else {
559						break;
560					}
561					nxt_todel = strm->last_sequence_delivered + 1;
562				}
563			}
564			break;
565		}
566		/* sa_ignore FREED_MEMORY */
567		chk = TAILQ_FIRST(&asoc->reasmqueue);
568	} while (chk);
569}
570
571/*
572 * Queue the chunk either right into the socket buffer if it is the next one
573 * to go OR put it in the correct place in the delivery queue.  If we do
574 * append to the so_buf, keep doing so until we are out of order. One big
575 * question still remains, what to do when the socket buffer is FULL??
576 */
577static void
578sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
579    struct sctp_queued_to_read *control, int *abort_flag)
580{
581	/*
582	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
583	 * all the data in one stream this could happen quite rapidly. One
584	 * could use the TSN to keep track of things, but this scheme breaks
585	 * down in the other type of stream useage that could occur. Send a
586	 * single msg to stream 0, send 4Billion messages to stream 1, now
587	 * send a message to stream 0. You have a situation where the TSN
588	 * has wrapped but not in the stream. Is this worth worrying about
589	 * or should we just change our queue sort at the bottom to be by
590	 * TSN.
591	 *
592	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
593	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
594	 * assignment this could happen... and I don't see how this would be
595	 * a violation. So for now I am undecided an will leave the sort by
596	 * SSN alone. Maybe a hybred approach is the answer
597	 *
598	 */
599	struct sctp_stream_in *strm;
600	struct sctp_queued_to_read *at;
601	int queue_needed;
602	uint16_t nxt_todel;
603	struct mbuf *oper;
604
605	/* EY- will be used to calculate nr-gap for a tsn */
606	uint32_t nr_tsn, nr_gap;
607
608	queue_needed = 1;
609	asoc->size_on_all_streams += control->length;
610	sctp_ucount_incr(asoc->cnt_on_all_streams);
611	strm = &asoc->strmin[control->sinfo_stream];
612	nxt_todel = strm->last_sequence_delivered + 1;
613	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
614		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
615	}
616	SCTPDBG(SCTP_DEBUG_INDATA1,
617	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
618	    (uint32_t) control->sinfo_stream,
619	    (uint32_t) strm->last_sequence_delivered,
620	    (uint32_t) nxt_todel);
621	if (compare_with_wrap(strm->last_sequence_delivered,
622	    control->sinfo_ssn, MAX_SEQ) ||
623	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
624		/* The incoming sseq is behind where we last delivered? */
625		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
626		    control->sinfo_ssn, strm->last_sequence_delivered);
627protocol_error:
628		/*
629		 * throw it in the stream so it gets cleaned up in
630		 * association destruction
631		 */
632		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
633		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
634		    0, M_DONTWAIT, 1, MT_DATA);
635		if (oper) {
636			struct sctp_paramhdr *ph;
637			uint32_t *ippp;
638
639			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
640			    (sizeof(uint32_t) * 3);
641			ph = mtod(oper, struct sctp_paramhdr *);
642			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
643			ph->param_length = htons(SCTP_BUF_LEN(oper));
644			ippp = (uint32_t *) (ph + 1);
645			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
646			ippp++;
647			*ippp = control->sinfo_tsn;
648			ippp++;
649			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
650		}
651		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
652		sctp_abort_an_association(stcb->sctp_ep, stcb,
653		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
654
655		*abort_flag = 1;
656		return;
657
658	}
659	if (nxt_todel == control->sinfo_ssn) {
660		/* can be delivered right away? */
661		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
662			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
663		}
664		/* EY it wont be queued if it could be delivered directly */
665		queue_needed = 0;
666		asoc->size_on_all_streams -= control->length;
667		sctp_ucount_decr(asoc->cnt_on_all_streams);
668		strm->last_sequence_delivered++;
669		/* EY will be used to calculate nr-gap */
670		nr_tsn = control->sinfo_tsn;
671		sctp_add_to_readq(stcb->sctp_ep, stcb,
672		    control,
673		    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
674
675		/*
676		 * EY this is the chunk that should be tagged nr gapped
677		 * calculate the gap and such then tag this TSN nr
678		 * chk->rec.data.TSN_seq
679		 */
680		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
681
682			if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
683				nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
684			} else {
685				nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
686			}
687			if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
688			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
689				/*
690				 * EY The 1st should never happen, as in
691				 * process_a_data_chunk method this check
692				 * should be done
693				 */
694				/*
695				 * EY The 2nd should never happen, because
696				 * nr_mapping_array is always expanded when
697				 * mapping_array is expanded
698				 */
699			} else {
700				SCTP_TCB_LOCK_ASSERT(stcb);
701				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
702				if (nr_tsn > asoc->highest_tsn_inside_nr_map)
703					asoc->highest_tsn_inside_nr_map = nr_tsn;
704			}
705		}
706		control = TAILQ_FIRST(&strm->inqueue);
707		while (control != NULL) {
708			/* all delivered */
709			nxt_todel = strm->last_sequence_delivered + 1;
710			if (nxt_todel == control->sinfo_ssn) {
711				at = TAILQ_NEXT(control, next);
712				TAILQ_REMOVE(&strm->inqueue, control, next);
713				asoc->size_on_all_streams -= control->length;
714				sctp_ucount_decr(asoc->cnt_on_all_streams);
715				strm->last_sequence_delivered++;
716				/*
717				 * We ignore the return of deliver_data here
718				 * since we always can hold the chunk on the
719				 * d-queue. And we have a finite number that
720				 * can be delivered from the strq.
721				 */
722				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
723					sctp_log_strm_del(control, NULL,
724					    SCTP_STR_LOG_FROM_IMMED_DEL);
725				}
726				/* EY will be used to calculate nr-gap */
727				nr_tsn = control->sinfo_tsn;
728				sctp_add_to_readq(stcb->sctp_ep, stcb,
729				    control,
730				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
731				/*
732				 * EY this is the chunk that should be
733				 * tagged nr gapped calculate the gap and
734				 * such then tag this TSN nr
735				 * chk->rec.data.TSN_seq
736				 */
737				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
738
739					if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
740						nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
741					} else {
742						nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
743					}
744					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
745					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
746						/*
747						 * EY The 1st should never
748						 * happen, as in
749						 * process_a_data_chunk
750						 * method this check should
751						 * be done
752						 */
753						/*
754						 * EY The 2nd should never
755						 * happen, because
756						 * nr_mapping_array is
757						 * always expanded when
758						 * mapping_array is expanded
759						 */
760					} else {
761						SCTP_TCB_LOCK_ASSERT(stcb);
762						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
763						if (nr_tsn > asoc->highest_tsn_inside_nr_map)
764							asoc->highest_tsn_inside_nr_map = nr_tsn;
765					}
766				}
767				control = at;
768				continue;
769			}
770			break;
771		}
772	}
773	if (queue_needed) {
774		/*
775		 * Ok, we did not deliver this guy, find the correct place
776		 * to put it on the queue.
777		 */
778		if ((compare_with_wrap(asoc->cumulative_tsn,
779		    control->sinfo_tsn, MAX_TSN)) ||
780		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
781			goto protocol_error;
782		}
783		if (TAILQ_EMPTY(&strm->inqueue)) {
784			/* Empty queue */
785			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
786				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
787			}
788			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
789		} else {
790			TAILQ_FOREACH(at, &strm->inqueue, next) {
791				if (compare_with_wrap(at->sinfo_ssn,
792				    control->sinfo_ssn, MAX_SEQ)) {
793					/*
794					 * one in queue is bigger than the
795					 * new one, insert before this one
796					 */
797					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
798						sctp_log_strm_del(control, at,
799						    SCTP_STR_LOG_FROM_INSERT_MD);
800					}
801					TAILQ_INSERT_BEFORE(at, control, next);
802					break;
803				} else if (at->sinfo_ssn == control->sinfo_ssn) {
804					/*
805					 * Gak, He sent me a duplicate str
806					 * seq number
807					 */
808					/*
809					 * foo bar, I guess I will just free
810					 * this new guy, should we abort
811					 * too? FIX ME MAYBE? Or it COULD be
812					 * that the SSN's have wrapped.
813					 * Maybe I should compare to TSN
814					 * somehow... sigh for now just blow
815					 * away the chunk!
816					 */
817
818					if (control->data)
819						sctp_m_freem(control->data);
820					control->data = NULL;
821					asoc->size_on_all_streams -= control->length;
822					sctp_ucount_decr(asoc->cnt_on_all_streams);
823					if (control->whoFrom)
824						sctp_free_remote_addr(control->whoFrom);
825					control->whoFrom = NULL;
826					sctp_free_a_readq(stcb, control);
827					return;
828				} else {
829					if (TAILQ_NEXT(at, next) == NULL) {
830						/*
831						 * We are at the end, insert
832						 * it after this one
833						 */
834						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
835							sctp_log_strm_del(control, at,
836							    SCTP_STR_LOG_FROM_INSERT_TL);
837						}
838						TAILQ_INSERT_AFTER(&strm->inqueue,
839						    at, control, next);
840						break;
841					}
842				}
843			}
844		}
845	}
846}
847
848/*
849 * Returns two things: You get the total size of the deliverable parts of the
850 * first fragmented message on the reassembly queue. And you get a 1 back if
851 * all of the message is ready or a 0 back if the message is still incomplete
852 */
853static int
854sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
855{
856	struct sctp_tmit_chunk *chk;
857	uint32_t tsn;
858
859	*t_size = 0;
860	chk = TAILQ_FIRST(&asoc->reasmqueue);
861	if (chk == NULL) {
862		/* nothing on the queue */
863		return (0);
864	}
865	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
866		/* Not a first on the queue */
867		return (0);
868	}
869	tsn = chk->rec.data.TSN_seq;
870	while (chk) {
871		if (tsn != chk->rec.data.TSN_seq) {
872			return (0);
873		}
874		*t_size += chk->send_size;
875		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
876			return (1);
877		}
878		tsn++;
879		chk = TAILQ_NEXT(chk, sctp_next);
880	}
881	return (0);
882}
883
884static void
885sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
886{
887	struct sctp_tmit_chunk *chk;
888	uint16_t nxt_todel;
889	uint32_t tsize;
890
891doit_again:
892	chk = TAILQ_FIRST(&asoc->reasmqueue);
893	if (chk == NULL) {
894		/* Huh? */
895		asoc->size_on_reasm_queue = 0;
896		asoc->cnt_on_reasm_queue = 0;
897		return;
898	}
899	if (asoc->fragmented_delivery_inprogress == 0) {
900		nxt_todel =
901		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
902		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
903		    (nxt_todel == chk->rec.data.stream_seq ||
904		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
905			/*
906			 * Yep the first one is here and its ok to deliver
907			 * but should we?
908			 */
909			if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
910			    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
911
912				/*
913				 * Yes, we setup to start reception, by
914				 * backing down the TSN just in case we
915				 * can't deliver. If we
916				 */
917				asoc->fragmented_delivery_inprogress = 1;
918				asoc->tsn_last_delivered =
919				    chk->rec.data.TSN_seq - 1;
920				asoc->str_of_pdapi =
921				    chk->rec.data.stream_number;
922				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
923				asoc->pdapi_ppid = chk->rec.data.payloadtype;
924				asoc->fragment_flags = chk->rec.data.rcv_flags;
925				sctp_service_reassembly(stcb, asoc);
926			}
927		}
928	} else {
929		/*
930		 * Service re-assembly will deliver stream data queued at
931		 * the end of fragmented delivery.. but it wont know to go
932		 * back and call itself again... we do that here with the
933		 * got doit_again
934		 */
935		sctp_service_reassembly(stcb, asoc);
936		if (asoc->fragmented_delivery_inprogress == 0) {
937			/*
938			 * finished our Fragmented delivery, could be more
939			 * waiting?
940			 */
941			goto doit_again;
942		}
943	}
944}
945
946/*
947 * Dump onto the re-assembly queue, in its proper place. After dumping on the
948 * queue, see if anthing can be delivered. If so pull it off (or as much as
949 * we can. If we run out of space then we must dump what we can and set the
950 * appropriate flag to say we queued what we could.
951 */
952static void
953sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
954    struct sctp_tmit_chunk *chk, int *abort_flag)
955{
956	struct mbuf *oper;
957	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
958	u_char last_flags;
959	struct sctp_tmit_chunk *at, *prev, *next;
960
961	prev = next = NULL;
962	cum_ackp1 = asoc->tsn_last_delivered + 1;
963	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
964		/* This is the first one on the queue */
965		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
966		/*
967		 * we do not check for delivery of anything when only one
968		 * fragment is here
969		 */
970		asoc->size_on_reasm_queue = chk->send_size;
971		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
972		if (chk->rec.data.TSN_seq == cum_ackp1) {
973			if (asoc->fragmented_delivery_inprogress == 0 &&
974			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
975			    SCTP_DATA_FIRST_FRAG) {
976				/*
977				 * An empty queue, no delivery inprogress,
978				 * we hit the next one and it does NOT have
979				 * a FIRST fragment mark.
980				 */
981				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
982				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
983				    0, M_DONTWAIT, 1, MT_DATA);
984
985				if (oper) {
986					struct sctp_paramhdr *ph;
987					uint32_t *ippp;
988
989					SCTP_BUF_LEN(oper) =
990					    sizeof(struct sctp_paramhdr) +
991					    (sizeof(uint32_t) * 3);
992					ph = mtod(oper, struct sctp_paramhdr *);
993					ph->param_type =
994					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
995					ph->param_length = htons(SCTP_BUF_LEN(oper));
996					ippp = (uint32_t *) (ph + 1);
997					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
998					ippp++;
999					*ippp = chk->rec.data.TSN_seq;
1000					ippp++;
1001					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1002
1003				}
1004				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
1005				sctp_abort_an_association(stcb->sctp_ep, stcb,
1006				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1007				*abort_flag = 1;
1008			} else if (asoc->fragmented_delivery_inprogress &&
1009			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1010				/*
1011				 * We are doing a partial delivery and the
1012				 * NEXT chunk MUST be either the LAST or
1013				 * MIDDLE fragment NOT a FIRST
1014				 */
1015				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1016				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1017				    0, M_DONTWAIT, 1, MT_DATA);
1018				if (oper) {
1019					struct sctp_paramhdr *ph;
1020					uint32_t *ippp;
1021
1022					SCTP_BUF_LEN(oper) =
1023					    sizeof(struct sctp_paramhdr) +
1024					    (3 * sizeof(uint32_t));
1025					ph = mtod(oper, struct sctp_paramhdr *);
1026					ph->param_type =
1027					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1028					ph->param_length = htons(SCTP_BUF_LEN(oper));
1029					ippp = (uint32_t *) (ph + 1);
1030					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
1031					ippp++;
1032					*ippp = chk->rec.data.TSN_seq;
1033					ippp++;
1034					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1035				}
1036				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
1037				sctp_abort_an_association(stcb->sctp_ep, stcb,
1038				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1039				*abort_flag = 1;
1040			} else if (asoc->fragmented_delivery_inprogress) {
1041				/*
1042				 * Here we are ok with a MIDDLE or LAST
1043				 * piece
1044				 */
1045				if (chk->rec.data.stream_number !=
1046				    asoc->str_of_pdapi) {
1047					/* Got to be the right STR No */
1048					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
1049					    chk->rec.data.stream_number,
1050					    asoc->str_of_pdapi);
1051					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1052					    0, M_DONTWAIT, 1, MT_DATA);
1053					if (oper) {
1054						struct sctp_paramhdr *ph;
1055						uint32_t *ippp;
1056
1057						SCTP_BUF_LEN(oper) =
1058						    sizeof(struct sctp_paramhdr) +
1059						    (sizeof(uint32_t) * 3);
1060						ph = mtod(oper,
1061						    struct sctp_paramhdr *);
1062						ph->param_type =
1063						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1064						ph->param_length =
1065						    htons(SCTP_BUF_LEN(oper));
1066						ippp = (uint32_t *) (ph + 1);
1067						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1068						ippp++;
1069						*ippp = chk->rec.data.TSN_seq;
1070						ippp++;
1071						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1072					}
1073					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
1074					sctp_abort_an_association(stcb->sctp_ep,
1075					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1076					*abort_flag = 1;
1077				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1078					    SCTP_DATA_UNORDERED &&
1079					    chk->rec.data.stream_seq !=
1080				    asoc->ssn_of_pdapi) {
1081					/* Got to be the right STR Seq */
1082					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1083					    chk->rec.data.stream_seq,
1084					    asoc->ssn_of_pdapi);
1085					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1086					    0, M_DONTWAIT, 1, MT_DATA);
1087					if (oper) {
1088						struct sctp_paramhdr *ph;
1089						uint32_t *ippp;
1090
1091						SCTP_BUF_LEN(oper) =
1092						    sizeof(struct sctp_paramhdr) +
1093						    (3 * sizeof(uint32_t));
1094						ph = mtod(oper,
1095						    struct sctp_paramhdr *);
1096						ph->param_type =
1097						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1098						ph->param_length =
1099						    htons(SCTP_BUF_LEN(oper));
1100						ippp = (uint32_t *) (ph + 1);
1101						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1102						ippp++;
1103						*ippp = chk->rec.data.TSN_seq;
1104						ippp++;
1105						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1106
1107					}
1108					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1109					sctp_abort_an_association(stcb->sctp_ep,
1110					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1111					*abort_flag = 1;
1112				}
1113			}
1114		}
1115		return;
1116	}
1117	/* Find its place */
1118	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1119		if (compare_with_wrap(at->rec.data.TSN_seq,
1120		    chk->rec.data.TSN_seq, MAX_TSN)) {
1121			/*
1122			 * one in queue is bigger than the new one, insert
1123			 * before this one
1124			 */
1125			/* A check */
1126			asoc->size_on_reasm_queue += chk->send_size;
1127			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1128			next = at;
1129			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1130			break;
1131		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1132			/* Gak, He sent me a duplicate str seq number */
1133			/*
1134			 * foo bar, I guess I will just free this new guy,
1135			 * should we abort too? FIX ME MAYBE? Or it COULD be
1136			 * that the SSN's have wrapped. Maybe I should
1137			 * compare to TSN somehow... sigh for now just blow
1138			 * away the chunk!
1139			 */
1140			if (chk->data) {
1141				sctp_m_freem(chk->data);
1142				chk->data = NULL;
1143			}
1144			sctp_free_a_chunk(stcb, chk);
1145			return;
1146		} else {
1147			last_flags = at->rec.data.rcv_flags;
1148			last_tsn = at->rec.data.TSN_seq;
1149			prev = at;
1150			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1151				/*
1152				 * We are at the end, insert it after this
1153				 * one
1154				 */
1155				/* check it first */
1156				asoc->size_on_reasm_queue += chk->send_size;
1157				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1158				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1159				break;
1160			}
1161		}
1162	}
1163	/* Now the audits */
1164	if (prev) {
1165		prev_tsn = chk->rec.data.TSN_seq - 1;
1166		if (prev_tsn == prev->rec.data.TSN_seq) {
1167			/*
1168			 * Ok the one I am dropping onto the end is the
1169			 * NEXT. A bit of valdiation here.
1170			 */
1171			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1172			    SCTP_DATA_FIRST_FRAG ||
1173			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1174			    SCTP_DATA_MIDDLE_FRAG) {
1175				/*
1176				 * Insert chk MUST be a MIDDLE or LAST
1177				 * fragment
1178				 */
1179				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1180				    SCTP_DATA_FIRST_FRAG) {
1181					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1182					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1183					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1184					    0, M_DONTWAIT, 1, MT_DATA);
1185					if (oper) {
1186						struct sctp_paramhdr *ph;
1187						uint32_t *ippp;
1188
1189						SCTP_BUF_LEN(oper) =
1190						    sizeof(struct sctp_paramhdr) +
1191						    (3 * sizeof(uint32_t));
1192						ph = mtod(oper,
1193						    struct sctp_paramhdr *);
1194						ph->param_type =
1195						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1196						ph->param_length =
1197						    htons(SCTP_BUF_LEN(oper));
1198						ippp = (uint32_t *) (ph + 1);
1199						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1200						ippp++;
1201						*ippp = chk->rec.data.TSN_seq;
1202						ippp++;
1203						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1204
1205					}
1206					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1207					sctp_abort_an_association(stcb->sctp_ep,
1208					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1209					*abort_flag = 1;
1210					return;
1211				}
1212				if (chk->rec.data.stream_number !=
1213				    prev->rec.data.stream_number) {
1214					/*
1215					 * Huh, need the correct STR here,
1216					 * they must be the same.
1217					 */
1218					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1219					    chk->rec.data.stream_number,
1220					    prev->rec.data.stream_number);
1221					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1222					    0, M_DONTWAIT, 1, MT_DATA);
1223					if (oper) {
1224						struct sctp_paramhdr *ph;
1225						uint32_t *ippp;
1226
1227						SCTP_BUF_LEN(oper) =
1228						    sizeof(struct sctp_paramhdr) +
1229						    (3 * sizeof(uint32_t));
1230						ph = mtod(oper,
1231						    struct sctp_paramhdr *);
1232						ph->param_type =
1233						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1234						ph->param_length =
1235						    htons(SCTP_BUF_LEN(oper));
1236						ippp = (uint32_t *) (ph + 1);
1237						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1238						ippp++;
1239						*ippp = chk->rec.data.TSN_seq;
1240						ippp++;
1241						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1242					}
1243					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1244					sctp_abort_an_association(stcb->sctp_ep,
1245					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1246
1247					*abort_flag = 1;
1248					return;
1249				}
1250				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1251				    chk->rec.data.stream_seq !=
1252				    prev->rec.data.stream_seq) {
1253					/*
1254					 * Huh, need the correct STR here,
1255					 * they must be the same.
1256					 */
1257					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1258					    chk->rec.data.stream_seq,
1259					    prev->rec.data.stream_seq);
1260					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1261					    0, M_DONTWAIT, 1, MT_DATA);
1262					if (oper) {
1263						struct sctp_paramhdr *ph;
1264						uint32_t *ippp;
1265
1266						SCTP_BUF_LEN(oper) =
1267						    sizeof(struct sctp_paramhdr) +
1268						    (3 * sizeof(uint32_t));
1269						ph = mtod(oper,
1270						    struct sctp_paramhdr *);
1271						ph->param_type =
1272						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1273						ph->param_length =
1274						    htons(SCTP_BUF_LEN(oper));
1275						ippp = (uint32_t *) (ph + 1);
1276						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1277						ippp++;
1278						*ippp = chk->rec.data.TSN_seq;
1279						ippp++;
1280						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1281					}
1282					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1283					sctp_abort_an_association(stcb->sctp_ep,
1284					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1285
1286					*abort_flag = 1;
1287					return;
1288				}
1289			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1290			    SCTP_DATA_LAST_FRAG) {
1291				/* Insert chk MUST be a FIRST */
1292				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1293				    SCTP_DATA_FIRST_FRAG) {
1294					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1295					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1296					    0, M_DONTWAIT, 1, MT_DATA);
1297					if (oper) {
1298						struct sctp_paramhdr *ph;
1299						uint32_t *ippp;
1300
1301						SCTP_BUF_LEN(oper) =
1302						    sizeof(struct sctp_paramhdr) +
1303						    (3 * sizeof(uint32_t));
1304						ph = mtod(oper,
1305						    struct sctp_paramhdr *);
1306						ph->param_type =
1307						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1308						ph->param_length =
1309						    htons(SCTP_BUF_LEN(oper));
1310						ippp = (uint32_t *) (ph + 1);
1311						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1312						ippp++;
1313						*ippp = chk->rec.data.TSN_seq;
1314						ippp++;
1315						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1316
1317					}
1318					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1319					sctp_abort_an_association(stcb->sctp_ep,
1320					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1321
1322					*abort_flag = 1;
1323					return;
1324				}
1325			}
1326		}
1327	}
1328	if (next) {
1329		post_tsn = chk->rec.data.TSN_seq + 1;
1330		if (post_tsn == next->rec.data.TSN_seq) {
1331			/*
1332			 * Ok the one I am inserting ahead of is my NEXT
1333			 * one. A bit of valdiation here.
1334			 */
1335			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1336				/* Insert chk MUST be a last fragment */
1337				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1338				    != SCTP_DATA_LAST_FRAG) {
1339					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1340					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1341					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1342					    0, M_DONTWAIT, 1, MT_DATA);
1343					if (oper) {
1344						struct sctp_paramhdr *ph;
1345						uint32_t *ippp;
1346
1347						SCTP_BUF_LEN(oper) =
1348						    sizeof(struct sctp_paramhdr) +
1349						    (3 * sizeof(uint32_t));
1350						ph = mtod(oper,
1351						    struct sctp_paramhdr *);
1352						ph->param_type =
1353						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1354						ph->param_length =
1355						    htons(SCTP_BUF_LEN(oper));
1356						ippp = (uint32_t *) (ph + 1);
1357						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1358						ippp++;
1359						*ippp = chk->rec.data.TSN_seq;
1360						ippp++;
1361						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1362					}
1363					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1364					sctp_abort_an_association(stcb->sctp_ep,
1365					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1366
1367					*abort_flag = 1;
1368					return;
1369				}
1370			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1371				    SCTP_DATA_MIDDLE_FRAG ||
1372				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1373			    SCTP_DATA_LAST_FRAG) {
1374				/*
1375				 * Insert chk CAN be MIDDLE or FIRST NOT
1376				 * LAST
1377				 */
1378				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1379				    SCTP_DATA_LAST_FRAG) {
1380					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1381					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1382					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1383					    0, M_DONTWAIT, 1, MT_DATA);
1384					if (oper) {
1385						struct sctp_paramhdr *ph;
1386						uint32_t *ippp;
1387
1388						SCTP_BUF_LEN(oper) =
1389						    sizeof(struct sctp_paramhdr) +
1390						    (3 * sizeof(uint32_t));
1391						ph = mtod(oper,
1392						    struct sctp_paramhdr *);
1393						ph->param_type =
1394						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1395						ph->param_length =
1396						    htons(SCTP_BUF_LEN(oper));
1397						ippp = (uint32_t *) (ph + 1);
1398						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1399						ippp++;
1400						*ippp = chk->rec.data.TSN_seq;
1401						ippp++;
1402						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1403
1404					}
1405					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1406					sctp_abort_an_association(stcb->sctp_ep,
1407					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1408
1409					*abort_flag = 1;
1410					return;
1411				}
1412				if (chk->rec.data.stream_number !=
1413				    next->rec.data.stream_number) {
1414					/*
1415					 * Huh, need the correct STR here,
1416					 * they must be the same.
1417					 */
1418					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1419					    chk->rec.data.stream_number,
1420					    next->rec.data.stream_number);
1421					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1422					    0, M_DONTWAIT, 1, MT_DATA);
1423					if (oper) {
1424						struct sctp_paramhdr *ph;
1425						uint32_t *ippp;
1426
1427						SCTP_BUF_LEN(oper) =
1428						    sizeof(struct sctp_paramhdr) +
1429						    (3 * sizeof(uint32_t));
1430						ph = mtod(oper,
1431						    struct sctp_paramhdr *);
1432						ph->param_type =
1433						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1434						ph->param_length =
1435						    htons(SCTP_BUF_LEN(oper));
1436						ippp = (uint32_t *) (ph + 1);
1437						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1438						ippp++;
1439						*ippp = chk->rec.data.TSN_seq;
1440						ippp++;
1441						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1442
1443					}
1444					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1445					sctp_abort_an_association(stcb->sctp_ep,
1446					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1447
1448					*abort_flag = 1;
1449					return;
1450				}
1451				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1452				    chk->rec.data.stream_seq !=
1453				    next->rec.data.stream_seq) {
1454					/*
1455					 * Huh, need the correct STR here,
1456					 * they must be the same.
1457					 */
1458					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1459					    chk->rec.data.stream_seq,
1460					    next->rec.data.stream_seq);
1461					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1462					    0, M_DONTWAIT, 1, MT_DATA);
1463					if (oper) {
1464						struct sctp_paramhdr *ph;
1465						uint32_t *ippp;
1466
1467						SCTP_BUF_LEN(oper) =
1468						    sizeof(struct sctp_paramhdr) +
1469						    (3 * sizeof(uint32_t));
1470						ph = mtod(oper,
1471						    struct sctp_paramhdr *);
1472						ph->param_type =
1473						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1474						ph->param_length =
1475						    htons(SCTP_BUF_LEN(oper));
1476						ippp = (uint32_t *) (ph + 1);
1477						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1478						ippp++;
1479						*ippp = chk->rec.data.TSN_seq;
1480						ippp++;
1481						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1482					}
1483					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1484					sctp_abort_an_association(stcb->sctp_ep,
1485					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1486
1487					*abort_flag = 1;
1488					return;
1489				}
1490			}
1491		}
1492	}
1493	/* Do we need to do some delivery? check */
1494	sctp_deliver_reasm_check(stcb, asoc);
1495}
1496
1497/*
1498 * This is an unfortunate routine. It checks to make sure a evil guy is not
1499 * stuffing us full of bad packet fragments. A broken peer could also do this
1500 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1501 * :< more cycles.
1502 */
1503static int
1504sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1505    uint32_t TSN_seq)
1506{
1507	struct sctp_tmit_chunk *at;
1508	uint32_t tsn_est;
1509
1510	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1511		if (compare_with_wrap(TSN_seq,
1512		    at->rec.data.TSN_seq, MAX_TSN)) {
1513			/* is it one bigger? */
1514			tsn_est = at->rec.data.TSN_seq + 1;
1515			if (tsn_est == TSN_seq) {
1516				/* yep. It better be a last then */
1517				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1518				    SCTP_DATA_LAST_FRAG) {
1519					/*
1520					 * Ok this guy belongs next to a guy
1521					 * that is NOT last, it should be a
1522					 * middle/last, not a complete
1523					 * chunk.
1524					 */
1525					return (1);
1526				} else {
1527					/*
1528					 * This guy is ok since its a LAST
1529					 * and the new chunk is a fully
1530					 * self- contained one.
1531					 */
1532					return (0);
1533				}
1534			}
1535		} else if (TSN_seq == at->rec.data.TSN_seq) {
1536			/* Software error since I have a dup? */
1537			return (1);
1538		} else {
1539			/*
1540			 * Ok, 'at' is larger than new chunk but does it
1541			 * need to be right before it.
1542			 */
1543			tsn_est = TSN_seq + 1;
1544			if (tsn_est == at->rec.data.TSN_seq) {
1545				/* Yep, It better be a first */
1546				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1547				    SCTP_DATA_FIRST_FRAG) {
1548					return (1);
1549				} else {
1550					return (0);
1551				}
1552			}
1553		}
1554	}
1555	return (0);
1556}
1557
1558
1559static int
1560sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1561    struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1562    struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1563    int *break_flag, int last_chunk)
1564{
1565	/* Process a data chunk */
1566	/* struct sctp_tmit_chunk *chk; */
1567	struct sctp_tmit_chunk *chk;
1568	uint32_t tsn, gap;
1569
1570	/* EY - for nr_sack */
1571	uint32_t nr_gap;
1572	struct mbuf *dmbuf;
1573	int indx, the_len;
1574	int need_reasm_check = 0;
1575	uint16_t strmno, strmseq;
1576	struct mbuf *oper;
1577	struct sctp_queued_to_read *control;
1578	int ordered;
1579	uint32_t protocol_id;
1580	uint8_t chunk_flags;
1581	struct sctp_stream_reset_list *liste;
1582
1583	chk = NULL;
1584	tsn = ntohl(ch->dp.tsn);
1585	chunk_flags = ch->ch.chunk_flags;
1586	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1587		asoc->send_sack = 1;
1588	}
1589	protocol_id = ch->dp.protocol_id;
1590	ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1591	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1592		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1593	}
1594	if (stcb == NULL) {
1595		return (0);
1596	}
1597	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1598	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1599	    asoc->cumulative_tsn == tsn) {
1600		/* It is a duplicate */
1601		SCTP_STAT_INCR(sctps_recvdupdata);
1602		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1603			/* Record a dup for the next outbound sack */
1604			asoc->dup_tsns[asoc->numduptsns] = tsn;
1605			asoc->numduptsns++;
1606		}
1607		asoc->send_sack = 1;
1608		return (0);
1609	}
1610	/* Calculate the number of TSN's between the base and this TSN */
1611	if (tsn >= asoc->mapping_array_base_tsn) {
1612		gap = tsn - asoc->mapping_array_base_tsn;
1613	} else {
1614		gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1615	}
1616	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1617		/* Can't hold the bit in the mapping at max array, toss it */
1618		return (0);
1619	}
1620	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1621		SCTP_TCB_LOCK_ASSERT(stcb);
1622		if (sctp_expand_mapping_array(asoc, gap)) {
1623			/* Can't expand, drop it */
1624			return (0);
1625		}
1626	}
1627	/* EY - for nr_sack */
1628	nr_gap = gap;
1629
1630	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1631		*high_tsn = tsn;
1632	}
1633	/* See if we have received this one already */
1634	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1635		SCTP_STAT_INCR(sctps_recvdupdata);
1636		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1637			/* Record a dup for the next outbound sack */
1638			asoc->dup_tsns[asoc->numduptsns] = tsn;
1639			asoc->numduptsns++;
1640		}
1641		asoc->send_sack = 1;
1642		return (0);
1643	}
1644	/*
1645	 * Check to see about the GONE flag, duplicates would cause a sack
1646	 * to be sent up above
1647	 */
1648	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1649	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1650	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1651	    ) {
1652		/*
1653		 * wait a minute, this guy is gone, there is no longer a
1654		 * receiver. Send peer an ABORT!
1655		 */
1656		struct mbuf *op_err;
1657
1658		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1659		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1660		*abort_flag = 1;
1661		return (0);
1662	}
1663	/*
1664	 * Now before going further we see if there is room. If NOT then we
1665	 * MAY let one through only IF this TSN is the one we are waiting
1666	 * for on a partial delivery API.
1667	 */
1668
1669	/* now do the tests */
1670	if (((asoc->cnt_on_all_streams +
1671	    asoc->cnt_on_reasm_queue +
1672	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1673	    (((int)asoc->my_rwnd) <= 0)) {
1674		/*
1675		 * When we have NO room in the rwnd we check to make sure
1676		 * the reader is doing its job...
1677		 */
1678		if (stcb->sctp_socket->so_rcv.sb_cc) {
1679			/* some to read, wake-up */
1680#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1681			struct socket *so;
1682
1683			so = SCTP_INP_SO(stcb->sctp_ep);
1684			atomic_add_int(&stcb->asoc.refcnt, 1);
1685			SCTP_TCB_UNLOCK(stcb);
1686			SCTP_SOCKET_LOCK(so, 1);
1687			SCTP_TCB_LOCK(stcb);
1688			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1689			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1690				/* assoc was freed while we were unlocked */
1691				SCTP_SOCKET_UNLOCK(so, 1);
1692				return (0);
1693			}
1694#endif
1695			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1696#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1697			SCTP_SOCKET_UNLOCK(so, 1);
1698#endif
1699		}
1700		/* now is it in the mapping array of what we have accepted? */
1701		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1702			/* Nope not in the valid range dump it */
1703			sctp_set_rwnd(stcb, asoc);
1704			if ((asoc->cnt_on_all_streams +
1705			    asoc->cnt_on_reasm_queue +
1706			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1707				SCTP_STAT_INCR(sctps_datadropchklmt);
1708			} else {
1709				SCTP_STAT_INCR(sctps_datadroprwnd);
1710			}
1711			indx = *break_flag;
1712			*break_flag = 1;
1713			return (0);
1714		}
1715	}
1716	strmno = ntohs(ch->dp.stream_id);
1717	if (strmno >= asoc->streamincnt) {
1718		struct sctp_paramhdr *phdr;
1719		struct mbuf *mb;
1720
1721		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1722		    0, M_DONTWAIT, 1, MT_DATA);
1723		if (mb != NULL) {
1724			/* add some space up front so prepend will work well */
1725			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1726			phdr = mtod(mb, struct sctp_paramhdr *);
1727			/*
1728			 * Error causes are just param's and this one has
1729			 * two back to back phdr, one with the error type
1730			 * and size, the other with the streamid and a rsvd
1731			 */
1732			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1733			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1734			phdr->param_length =
1735			    htons(sizeof(struct sctp_paramhdr) * 2);
1736			phdr++;
1737			/* We insert the stream in the type field */
1738			phdr->param_type = ch->dp.stream_id;
1739			/* And set the length to 0 for the rsvd field */
1740			phdr->param_length = 0;
1741			sctp_queue_op_err(stcb, mb);
1742		}
1743		SCTP_STAT_INCR(sctps_badsid);
1744		SCTP_TCB_LOCK_ASSERT(stcb);
1745		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1746		/* EY set this tsn present in  nr_sack's nr_mapping_array */
1747		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1748			SCTP_TCB_LOCK_ASSERT(stcb);
1749			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1750		}
1751		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1752			/* we have a new high score */
1753			asoc->highest_tsn_inside_map = tsn;
1754			/* EY nr_sack version of the above */
1755			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
1756				asoc->highest_tsn_inside_nr_map = tsn;
1757			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1758				sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1759			}
1760		}
1761		if (tsn == (asoc->cumulative_tsn + 1)) {
1762			/* Update cum-ack */
1763			asoc->cumulative_tsn = tsn;
1764		}
1765		return (0);
1766	}
1767	/*
1768	 * Before we continue lets validate that we are not being fooled by
1769	 * an evil attacker. We can only have 4k chunks based on our TSN
1770	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1771	 * way our stream sequence numbers could have wrapped. We of course
1772	 * only validate the FIRST fragment so the bit must be set.
1773	 */
1774	strmseq = ntohs(ch->dp.stream_sequence);
1775#ifdef SCTP_ASOCLOG_OF_TSNS
1776	SCTP_TCB_LOCK_ASSERT(stcb);
1777	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1778		asoc->tsn_in_at = 0;
1779		asoc->tsn_in_wrapped = 1;
1780	}
1781	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1782	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1783	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1784	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1785	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1786	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1787	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1788	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1789	asoc->tsn_in_at++;
1790#endif
1791	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1792	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1793	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1794	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1795	    strmseq, MAX_SEQ) ||
1796	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1797		/* The incoming sseq is behind where we last delivered? */
1798		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1799		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1800		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1801		    0, M_DONTWAIT, 1, MT_DATA);
1802		if (oper) {
1803			struct sctp_paramhdr *ph;
1804			uint32_t *ippp;
1805
1806			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1807			    (3 * sizeof(uint32_t));
1808			ph = mtod(oper, struct sctp_paramhdr *);
1809			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1810			ph->param_length = htons(SCTP_BUF_LEN(oper));
1811			ippp = (uint32_t *) (ph + 1);
1812			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1813			ippp++;
1814			*ippp = tsn;
1815			ippp++;
1816			*ippp = ((strmno << 16) | strmseq);
1817
1818		}
1819		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1820		sctp_abort_an_association(stcb->sctp_ep, stcb,
1821		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1822		*abort_flag = 1;
1823		return (0);
1824	}
1825	/************************************
1826	 * From here down we may find ch-> invalid
1827	 * so its a good idea NOT to use it.
1828	 *************************************/
1829
1830	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1831	if (last_chunk == 0) {
1832		dmbuf = SCTP_M_COPYM(*m,
1833		    (offset + sizeof(struct sctp_data_chunk)),
1834		    the_len, M_DONTWAIT);
1835#ifdef SCTP_MBUF_LOGGING
1836		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1837			struct mbuf *mat;
1838
1839			mat = dmbuf;
1840			while (mat) {
1841				if (SCTP_BUF_IS_EXTENDED(mat)) {
1842					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1843				}
1844				mat = SCTP_BUF_NEXT(mat);
1845			}
1846		}
1847#endif
1848	} else {
1849		/* We can steal the last chunk */
1850		int l_len;
1851
1852		dmbuf = *m;
1853		/* lop off the top part */
1854		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1855		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1856			l_len = SCTP_BUF_LEN(dmbuf);
1857		} else {
1858			/*
1859			 * need to count up the size hopefully does not hit
1860			 * this to often :-0
1861			 */
1862			struct mbuf *lat;
1863
1864			l_len = 0;
1865			lat = dmbuf;
1866			while (lat) {
1867				l_len += SCTP_BUF_LEN(lat);
1868				lat = SCTP_BUF_NEXT(lat);
1869			}
1870		}
1871		if (l_len > the_len) {
1872			/* Trim the end round bytes off  too */
1873			m_adj(dmbuf, -(l_len - the_len));
1874		}
1875	}
1876	if (dmbuf == NULL) {
1877		SCTP_STAT_INCR(sctps_nomem);
1878		return (0);
1879	}
1880	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1881	    asoc->fragmented_delivery_inprogress == 0 &&
1882	    TAILQ_EMPTY(&asoc->resetHead) &&
1883	    ((ordered == 0) ||
1884	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1885	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1886		/* Candidate for express delivery */
1887		/*
1888		 * Its not fragmented, No PD-API is up, Nothing in the
1889		 * delivery queue, Its un-ordered OR ordered and the next to
1890		 * deliver AND nothing else is stuck on the stream queue,
1891		 * And there is room for it in the socket buffer. Lets just
1892		 * stuff it up the buffer....
1893		 */
1894
1895		/* It would be nice to avoid this copy if we could :< */
1896		sctp_alloc_a_readq(stcb, control);
1897		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1898		    protocol_id,
1899		    stcb->asoc.context,
1900		    strmno, strmseq,
1901		    chunk_flags,
1902		    dmbuf);
1903		if (control == NULL) {
1904			goto failed_express_del;
1905		}
1906		sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
1907
1908		/*
1909		 * EY here I should check if this delivered tsn is
1910		 * out_of_order, if yes then update the nr_map
1911		 */
1912		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1913			/*
1914			 * EY check if the mapping_array and nr_mapping
1915			 * array are consistent
1916			 */
1917			if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
1918				/*
1919				 * printf("EY-IN
1920				 * sctp_process_a_data_chunk(5): Something
1921				 * is wrong the map base tsn" "\nEY-and
1922				 * nr_map base tsn should be equal.");
1923				 */
1924				/* EY debugging block */
1925			{
1926				/*
1927				 * printf("\nEY-Calculating an
1928				 * nr_gap!!\nmapping_array_size = %d
1929				 * nr_mapping_array_size = %d"
1930				 * "\nEY-mapping_array_base = %d
1931				 * nr_mapping_array_base =
1932				 * %d\nEY-highest_tsn_inside_map = %d"
1933				 * "highest_tsn_inside_nr_map = %d\nEY-TSN =
1934				 * %d nr_gap = %d",asoc->mapping_array_size,
1935				 * asoc->nr_mapping_array_size,
1936				 * asoc->mapping_array_base_tsn,
1937				 * asoc->nr_mapping_array_base_tsn,
1938				 * asoc->highest_tsn_inside_map,
1939				 * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
1940				 * );
1941				 */
1942			}
1943			/* EY - not %100 sure about the lock thing */
1944			SCTP_TCB_LOCK_ASSERT(stcb);
1945			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
1946			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
1947				asoc->highest_tsn_inside_nr_map = tsn;
1948		}
1949		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1950			/* for ordered, bump what we delivered */
1951			asoc->strmin[strmno].last_sequence_delivered++;
1952		}
1953		SCTP_STAT_INCR(sctps_recvexpress);
1954		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1955			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1956			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1957		}
1958		control = NULL;
1959		goto finish_express_del;
1960	}
1961failed_express_del:
1962	/* If we reach here this is a new chunk */
1963	chk = NULL;
1964	control = NULL;
1965	/* Express for fragmented delivery? */
1966	if ((asoc->fragmented_delivery_inprogress) &&
1967	    (stcb->asoc.control_pdapi) &&
1968	    (asoc->str_of_pdapi == strmno) &&
1969	    (asoc->ssn_of_pdapi == strmseq)
1970	    ) {
1971		control = stcb->asoc.control_pdapi;
1972		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1973			/* Can't be another first? */
1974			goto failed_pdapi_express_del;
1975		}
1976		if (tsn == (control->sinfo_tsn + 1)) {
1977			/* Yep, we can add it on */
1978			int end = 0;
1979			uint32_t cumack;
1980
1981			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1982				end = 1;
1983			}
1984			cumack = asoc->cumulative_tsn;
1985			if ((cumack + 1) == tsn)
1986				cumack = tsn;
1987
1988			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1989			    tsn,
1990			    &stcb->sctp_socket->so_rcv)) {
1991				SCTP_PRINTF("Append fails end:%d\n", end);
1992				goto failed_pdapi_express_del;
1993			}
1994			/*
1995			 * EY It is appended to the read queue in prev if
1996			 * block here I should check if this delivered tsn
1997			 * is out_of_order, if yes then update the nr_map
1998			 */
1999			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2000				/* EY debugging block */
2001				{
2002					/*
2003					 * printf("\nEY-Calculating an
2004					 * nr_gap!!\nEY-mapping_array_size =
2005					 * %d nr_mapping_array_size = %d"
2006					 * "\nEY-mapping_array_base = %d
2007					 * nr_mapping_array_base =
2008					 * %d\nEY-highest_tsn_inside_map =
2009					 * %d" "highest_tsn_inside_nr_map =
2010					 * %d\nEY-TSN = %d nr_gap =
2011					 * %d",asoc->mapping_array_size,
2012					 * asoc->nr_mapping_array_size,
2013					 * asoc->mapping_array_base_tsn,
2014					 * asoc->nr_mapping_array_base_tsn,
2015					 * asoc->highest_tsn_inside_map,
2016					 * asoc->highest_tsn_inside_nr_map,ts
2017					 * n,nr_gap);
2018					 */
2019				}
2020				/* EY - not %100 sure about the lock thing */
2021				SCTP_TCB_LOCK_ASSERT(stcb);
2022				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2023				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2024					asoc->highest_tsn_inside_nr_map = tsn;
2025			}
2026			SCTP_STAT_INCR(sctps_recvexpressm);
2027			control->sinfo_tsn = tsn;
2028			asoc->tsn_last_delivered = tsn;
2029			asoc->fragment_flags = chunk_flags;
2030			asoc->tsn_of_pdapi_last_delivered = tsn;
2031			asoc->last_flags_delivered = chunk_flags;
2032			asoc->last_strm_seq_delivered = strmseq;
2033			asoc->last_strm_no_delivered = strmno;
2034			if (end) {
2035				/* clean up the flags and such */
2036				asoc->fragmented_delivery_inprogress = 0;
2037				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2038					asoc->strmin[strmno].last_sequence_delivered++;
2039				}
2040				stcb->asoc.control_pdapi = NULL;
2041				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
2042					/*
2043					 * There could be another message
2044					 * ready
2045					 */
2046					need_reasm_check = 1;
2047				}
2048			}
2049			control = NULL;
2050			goto finish_express_del;
2051		}
2052	}
2053failed_pdapi_express_del:
2054	control = NULL;
2055	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2056		sctp_alloc_a_chunk(stcb, chk);
2057		if (chk == NULL) {
2058			/* No memory so we drop the chunk */
2059			SCTP_STAT_INCR(sctps_nomem);
2060			if (last_chunk == 0) {
2061				/* we copied it, free the copy */
2062				sctp_m_freem(dmbuf);
2063			}
2064			return (0);
2065		}
2066		chk->rec.data.TSN_seq = tsn;
2067		chk->no_fr_allowed = 0;
2068		chk->rec.data.stream_seq = strmseq;
2069		chk->rec.data.stream_number = strmno;
2070		chk->rec.data.payloadtype = protocol_id;
2071		chk->rec.data.context = stcb->asoc.context;
2072		chk->rec.data.doing_fast_retransmit = 0;
2073		chk->rec.data.rcv_flags = chunk_flags;
2074		chk->asoc = asoc;
2075		chk->send_size = the_len;
2076		chk->whoTo = net;
2077		atomic_add_int(&net->ref_count, 1);
2078		chk->data = dmbuf;
2079	} else {
2080		sctp_alloc_a_readq(stcb, control);
2081		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2082		    protocol_id,
2083		    stcb->asoc.context,
2084		    strmno, strmseq,
2085		    chunk_flags,
2086		    dmbuf);
2087		if (control == NULL) {
2088			/* No memory so we drop the chunk */
2089			SCTP_STAT_INCR(sctps_nomem);
2090			if (last_chunk == 0) {
2091				/* we copied it, free the copy */
2092				sctp_m_freem(dmbuf);
2093			}
2094			return (0);
2095		}
2096		control->length = the_len;
2097	}
2098
2099	/* Mark it as received */
2100	/* Now queue it where it belongs */
2101	if (control != NULL) {
2102		/* First a sanity check */
2103		if (asoc->fragmented_delivery_inprogress) {
2104			/*
2105			 * Ok, we have a fragmented delivery in progress if
2106			 * this chunk is next to deliver OR belongs in our
2107			 * view to the reassembly, the peer is evil or
2108			 * broken.
2109			 */
2110			uint32_t estimate_tsn;
2111
2112			estimate_tsn = asoc->tsn_last_delivered + 1;
2113			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2114			    (estimate_tsn == control->sinfo_tsn)) {
2115				/* Evil/Broke peer */
2116				sctp_m_freem(control->data);
2117				control->data = NULL;
2118				if (control->whoFrom) {
2119					sctp_free_remote_addr(control->whoFrom);
2120					control->whoFrom = NULL;
2121				}
2122				sctp_free_a_readq(stcb, control);
2123				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2124				    0, M_DONTWAIT, 1, MT_DATA);
2125				if (oper) {
2126					struct sctp_paramhdr *ph;
2127					uint32_t *ippp;
2128
2129					SCTP_BUF_LEN(oper) =
2130					    sizeof(struct sctp_paramhdr) +
2131					    (3 * sizeof(uint32_t));
2132					ph = mtod(oper, struct sctp_paramhdr *);
2133					ph->param_type =
2134					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2135					ph->param_length = htons(SCTP_BUF_LEN(oper));
2136					ippp = (uint32_t *) (ph + 1);
2137					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
2138					ippp++;
2139					*ippp = tsn;
2140					ippp++;
2141					*ippp = ((strmno << 16) | strmseq);
2142				}
2143				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
2144				sctp_abort_an_association(stcb->sctp_ep, stcb,
2145				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2146
2147				*abort_flag = 1;
2148				return (0);
2149			} else {
2150				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2151					sctp_m_freem(control->data);
2152					control->data = NULL;
2153					if (control->whoFrom) {
2154						sctp_free_remote_addr(control->whoFrom);
2155						control->whoFrom = NULL;
2156					}
2157					sctp_free_a_readq(stcb, control);
2158
2159					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2160					    0, M_DONTWAIT, 1, MT_DATA);
2161					if (oper) {
2162						struct sctp_paramhdr *ph;
2163						uint32_t *ippp;
2164
2165						SCTP_BUF_LEN(oper) =
2166						    sizeof(struct sctp_paramhdr) +
2167						    (3 * sizeof(uint32_t));
2168						ph = mtod(oper,
2169						    struct sctp_paramhdr *);
2170						ph->param_type =
2171						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2172						ph->param_length =
2173						    htons(SCTP_BUF_LEN(oper));
2174						ippp = (uint32_t *) (ph + 1);
2175						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
2176						ippp++;
2177						*ippp = tsn;
2178						ippp++;
2179						*ippp = ((strmno << 16) | strmseq);
2180					}
2181					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2182					sctp_abort_an_association(stcb->sctp_ep,
2183					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2184
2185					*abort_flag = 1;
2186					return (0);
2187				}
2188			}
2189		} else {
2190			/* No PDAPI running */
2191			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2192				/*
2193				 * Reassembly queue is NOT empty validate
2194				 * that this tsn does not need to be in
2195				 * reasembly queue. If it does then our peer
2196				 * is broken or evil.
2197				 */
2198				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2199					sctp_m_freem(control->data);
2200					control->data = NULL;
2201					if (control->whoFrom) {
2202						sctp_free_remote_addr(control->whoFrom);
2203						control->whoFrom = NULL;
2204					}
2205					sctp_free_a_readq(stcb, control);
2206					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2207					    0, M_DONTWAIT, 1, MT_DATA);
2208					if (oper) {
2209						struct sctp_paramhdr *ph;
2210						uint32_t *ippp;
2211
2212						SCTP_BUF_LEN(oper) =
2213						    sizeof(struct sctp_paramhdr) +
2214						    (3 * sizeof(uint32_t));
2215						ph = mtod(oper,
2216						    struct sctp_paramhdr *);
2217						ph->param_type =
2218						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2219						ph->param_length =
2220						    htons(SCTP_BUF_LEN(oper));
2221						ippp = (uint32_t *) (ph + 1);
2222						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2223						ippp++;
2224						*ippp = tsn;
2225						ippp++;
2226						*ippp = ((strmno << 16) | strmseq);
2227					}
2228					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2229					sctp_abort_an_association(stcb->sctp_ep,
2230					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2231
2232					*abort_flag = 1;
2233					return (0);
2234				}
2235			}
2236		}
2237		/* ok, if we reach here we have passed the sanity checks */
2238		if (chunk_flags & SCTP_DATA_UNORDERED) {
2239			/* queue directly into socket buffer */
2240			sctp_add_to_readq(stcb->sctp_ep, stcb,
2241			    control,
2242			    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2243
2244			/*
2245			 * EY It is added to the read queue in prev if block
2246			 * here I should check if this delivered tsn is
2247			 * out_of_order, if yes then update the nr_map
2248			 */
2249			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2250				/*
2251				 * EY check if the mapping_array and
2252				 * nr_mapping array are consistent
2253				 */
2254				if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
2255					/*
2256					 * printf("EY-IN
2257					 * sctp_process_a_data_chunk(6):
2258					 * Something is wrong the map base
2259					 * tsn" "\nEY-and nr_map base tsn
2260					 * should be equal.");
2261					 */
2262					/*
2263					 * EY - not %100 sure about the lock
2264					 * thing, i think we don't need the
2265					 * below,
2266					 */
2267					/* SCTP_TCB_LOCK_ASSERT(stcb); */
2268				{
2269					/*
2270					 * printf("\nEY-Calculating an
2271					 * nr_gap!!\nEY-mapping_array_size =
2272					 * %d nr_mapping_array_size = %d"
2273					 * "\nEY-mapping_array_base = %d
2274					 * nr_mapping_array_base =
2275					 * %d\nEY-highest_tsn_inside_map =
2276					 * %d" "highest_tsn_inside_nr_map =
2277					 * %d\nEY-TSN = %d nr_gap =
2278					 * %d",asoc->mapping_array_size,
2279					 * asoc->nr_mapping_array_size,
2280					 * asoc->mapping_array_base_tsn,
2281					 * asoc->nr_mapping_array_base_tsn,
2282					 * asoc->highest_tsn_inside_map,
2283					 * asoc->highest_tsn_inside_nr_map,ts
2284					 * n,nr_gap);
2285					 */
2286				}
2287				SCTP_TCB_LOCK_ASSERT(stcb);
2288				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2289				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2290					asoc->highest_tsn_inside_nr_map = tsn;
2291			}
2292		} else {
2293			/*
2294			 * Special check for when streams are resetting. We
2295			 * could be more smart about this and check the
2296			 * actual stream to see if it is not being reset..
2297			 * that way we would not create a HOLB when amongst
2298			 * streams being reset and those not being reset.
2299			 *
2300			 * We take complete messages that have a stream reset
2301			 * intervening (aka the TSN is after where our
2302			 * cum-ack needs to be) off and put them on a
2303			 * pending_reply_queue. The reassembly ones we do
2304			 * not have to worry about since they are all sorted
2305			 * and proceessed by TSN order. It is only the
2306			 * singletons I must worry about.
2307			 */
2308			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2309			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2310			    ) {
2311				/*
2312				 * yep its past where we need to reset... go
2313				 * ahead and queue it.
2314				 */
2315				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2316					/* first one on */
2317					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2318				} else {
2319					struct sctp_queued_to_read *ctlOn;
2320					unsigned char inserted = 0;
2321
2322					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2323					while (ctlOn) {
2324						if (compare_with_wrap(control->sinfo_tsn,
2325						    ctlOn->sinfo_tsn, MAX_TSN)) {
2326							ctlOn = TAILQ_NEXT(ctlOn, next);
2327						} else {
2328							/* found it */
2329							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2330							inserted = 1;
2331							break;
2332						}
2333					}
2334					if (inserted == 0) {
2335						/*
2336						 * must be put at end, use
2337						 * prevP (all setup from
2338						 * loop) to setup nextP.
2339						 */
2340						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2341					}
2342				}
2343			} else {
2344				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2345				if (*abort_flag) {
2346					return (0);
2347				}
2348			}
2349		}
2350	} else {
2351		/* Into the re-assembly queue */
2352		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2353		if (*abort_flag) {
2354			/*
2355			 * the assoc is now gone and chk was put onto the
2356			 * reasm queue, which has all been freed.
2357			 */
2358			*m = NULL;
2359			return (0);
2360		}
2361	}
2362finish_express_del:
2363	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2364		/* we have a new high score */
2365		asoc->highest_tsn_inside_map = tsn;
2366		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2367			sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2368		}
2369	}
2370	if (tsn == (asoc->cumulative_tsn + 1)) {
2371		/* Update cum-ack */
2372		asoc->cumulative_tsn = tsn;
2373	}
2374	if (last_chunk) {
2375		*m = NULL;
2376	}
2377	if (ordered) {
2378		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2379	} else {
2380		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2381	}
2382	SCTP_STAT_INCR(sctps_recvdata);
2383	/* Set it present please */
2384	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2385		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2386	}
2387	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2389		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2390	}
2391	SCTP_TCB_LOCK_ASSERT(stcb);
2392	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2393	/* check the special flag for stream resets */
2394	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2395	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2396	    (asoc->cumulative_tsn == liste->tsn))
2397	    ) {
2398		/*
2399		 * we have finished working through the backlogged TSN's now
2400		 * time to reset streams. 1: call reset function. 2: free
2401		 * pending_reply space 3: distribute any chunks in
2402		 * pending_reply_queue.
2403		 */
2404		struct sctp_queued_to_read *ctl;
2405
2406		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2407		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2408		SCTP_FREE(liste, SCTP_M_STRESET);
2409		/* sa_ignore FREED_MEMORY */
2410		liste = TAILQ_FIRST(&asoc->resetHead);
2411		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2412		if (ctl && (liste == NULL)) {
2413			/* All can be removed */
2414			while (ctl) {
2415				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2416				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2417				if (*abort_flag) {
2418					return (0);
2419				}
2420				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2421			}
2422		} else if (ctl) {
2423			/* more than one in queue */
2424			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2425				/*
2426				 * if ctl->sinfo_tsn is <= liste->tsn we can
2427				 * process it which is the NOT of
2428				 * ctl->sinfo_tsn > liste->tsn
2429				 */
2430				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2431				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2432				if (*abort_flag) {
2433					return (0);
2434				}
2435				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2436			}
2437		}
2438		/*
2439		 * Now service re-assembly to pick up anything that has been
2440		 * held on reassembly queue?
2441		 */
2442		sctp_deliver_reasm_check(stcb, asoc);
2443		need_reasm_check = 0;
2444	}
2445	if (need_reasm_check) {
2446		/* Another one waits ? */
2447		sctp_deliver_reasm_check(stcb, asoc);
2448	}
2449	return (1);
2450}
2451
2452int8_t sctp_map_lookup_tab[256] = {
2453	-1, 0, -1, 1, -1, 0, -1, 2,
2454	-1, 0, -1, 1, -1, 0, -1, 3,
2455	-1, 0, -1, 1, -1, 0, -1, 2,
2456	-1, 0, -1, 1, -1, 0, -1, 4,
2457	-1, 0, -1, 1, -1, 0, -1, 2,
2458	-1, 0, -1, 1, -1, 0, -1, 3,
2459	-1, 0, -1, 1, -1, 0, -1, 2,
2460	-1, 0, -1, 1, -1, 0, -1, 5,
2461	-1, 0, -1, 1, -1, 0, -1, 2,
2462	-1, 0, -1, 1, -1, 0, -1, 3,
2463	-1, 0, -1, 1, -1, 0, -1, 2,
2464	-1, 0, -1, 1, -1, 0, -1, 4,
2465	-1, 0, -1, 1, -1, 0, -1, 2,
2466	-1, 0, -1, 1, -1, 0, -1, 3,
2467	-1, 0, -1, 1, -1, 0, -1, 2,
2468	-1, 0, -1, 1, -1, 0, -1, 6,
2469	-1, 0, -1, 1, -1, 0, -1, 2,
2470	-1, 0, -1, 1, -1, 0, -1, 3,
2471	-1, 0, -1, 1, -1, 0, -1, 2,
2472	-1, 0, -1, 1, -1, 0, -1, 4,
2473	-1, 0, -1, 1, -1, 0, -1, 2,
2474	-1, 0, -1, 1, -1, 0, -1, 3,
2475	-1, 0, -1, 1, -1, 0, -1, 2,
2476	-1, 0, -1, 1, -1, 0, -1, 5,
2477	-1, 0, -1, 1, -1, 0, -1, 2,
2478	-1, 0, -1, 1, -1, 0, -1, 3,
2479	-1, 0, -1, 1, -1, 0, -1, 2,
2480	-1, 0, -1, 1, -1, 0, -1, 4,
2481	-1, 0, -1, 1, -1, 0, -1, 2,
2482	-1, 0, -1, 1, -1, 0, -1, 3,
2483	-1, 0, -1, 1, -1, 0, -1, 2,
2484	-1, 0, -1, 1, -1, 0, -1, 7,
2485};
2486
2487
2488void
2489sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2490{
2491	/*
2492	 * Now we also need to check the mapping array in a couple of ways.
2493	 * 1) Did we move the cum-ack point?
2494	 */
2495	struct sctp_association *asoc;
2496	int at;
2497	int last_all_ones = 0;
2498	int slide_from, slide_end, lgap, distance;
2499
2500	/* EY nr_mapping array variables */
2501	int nr_at;
2502	int nr_last_all_ones = 0;
2503	int nr_slide_from, nr_slide_end, nr_lgap, nr_distance;
2504
2505	uint32_t old_cumack, old_base, old_highest;
2506	unsigned char aux_array[64];
2507
2508	/*
2509	 * EY! Don't think this is required but I am immitating the code for
2510	 * map just to make sure
2511	 */
2512	unsigned char nr_aux_array[64];
2513
2514	asoc = &stcb->asoc;
2515	at = 0;
2516
2517	old_cumack = asoc->cumulative_tsn;
2518	old_base = asoc->mapping_array_base_tsn;
2519	old_highest = asoc->highest_tsn_inside_map;
2520	if (asoc->mapping_array_size < 64)
2521		memcpy(aux_array, asoc->mapping_array,
2522		    asoc->mapping_array_size);
2523	else
2524		memcpy(aux_array, asoc->mapping_array, 64);
2525	/* EY do the same for nr_mapping_array */
2526	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2527
2528		if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
2529			/*
2530			 * printf("\nEY-IN sack_check method: \nEY-" "The
2531			 * size of map and nr_map are inconsitent")
2532			 */ ;
2533		}
2534		if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
2535			/*
2536			 * printf("\nEY-IN sack_check method VERY CRUCIAL
2537			 * error: \nEY-" "The base tsns of map and nr_map
2538			 * are inconsitent")
2539			 */ ;
2540		}
2541		/* EY! just immitating the above code */
2542		if (asoc->nr_mapping_array_size < 64)
2543			memcpy(nr_aux_array, asoc->nr_mapping_array,
2544			    asoc->nr_mapping_array_size);
2545		else
2546			memcpy(aux_array, asoc->nr_mapping_array, 64);
2547	}
2548	/*
2549	 * We could probably improve this a small bit by calculating the
2550	 * offset of the current cum-ack as the starting point.
2551	 */
2552	at = 0;
2553	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2554
2555		if (asoc->mapping_array[slide_from] == 0xff) {
2556			at += 8;
2557			last_all_ones = 1;
2558		} else {
2559			/* there is a 0 bit */
2560			at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]];
2561			last_all_ones = 0;
2562			break;
2563		}
2564	}
2565	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2566	/* at is one off, since in the table a embedded -1 is present */
2567	at++;
2568
2569	if (compare_with_wrap(asoc->cumulative_tsn,
2570	    asoc->highest_tsn_inside_map,
2571	    MAX_TSN)) {
2572#ifdef INVARIANTS
2573		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2574		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2575#else
2576		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2577		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2578		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2579			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2580		}
2581		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2582		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2583#endif
2584	}
2585	if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2586		/* The complete array was completed by a single FR */
2587		/* higest becomes the cum-ack */
2588		int clr;
2589
2590		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2591		/* clear the array */
2592		clr = (at >> 3) + 1;
2593		if (clr > asoc->mapping_array_size) {
2594			clr = asoc->mapping_array_size;
2595		}
2596		memset(asoc->mapping_array, 0, clr);
2597		/* base becomes one ahead of the cum-ack */
2598		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2599
2600		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2601
2602			if (clr > asoc->nr_mapping_array_size)
2603				clr = asoc->nr_mapping_array_size;
2604
2605			memset(asoc->nr_mapping_array, 0, clr);
2606			/* base becomes one ahead of the cum-ack */
2607			asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2608			asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2609		}
2610		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2611			sctp_log_map(old_base, old_cumack, old_highest,
2612			    SCTP_MAP_PREPARE_SLIDE);
2613			sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2614			    asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2615		}
2616	} else if (at >= 8) {
2617		/* we can slide the mapping array down */
2618		/* slide_from holds where we hit the first NON 0xff byte */
2619
2620		/*
2621		 * now calculate the ceiling of the move using our highest
2622		 * TSN value
2623		 */
2624		if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2625			lgap = asoc->highest_tsn_inside_map -
2626			    asoc->mapping_array_base_tsn;
2627		} else {
2628			lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2629			    asoc->highest_tsn_inside_map + 1;
2630		}
2631		slide_end = lgap >> 3;
2632		if (slide_end < slide_from) {
2633#ifdef INVARIANTS
2634			panic("impossible slide");
2635#else
2636			printf("impossible slide?\n");
2637			return;
2638#endif
2639		}
2640		if (slide_end > asoc->mapping_array_size) {
2641#ifdef INVARIANTS
2642			panic("would overrun buffer");
2643#else
2644			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2645			    asoc->mapping_array_size, slide_end);
2646			slide_end = asoc->mapping_array_size;
2647#endif
2648		}
2649		distance = (slide_end - slide_from) + 1;
2650		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2651			sctp_log_map(old_base, old_cumack, old_highest,
2652			    SCTP_MAP_PREPARE_SLIDE);
2653			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2654			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2655		}
2656		if (distance + slide_from > asoc->mapping_array_size ||
2657		    distance < 0) {
2658			/*
2659			 * Here we do NOT slide forward the array so that
2660			 * hopefully when more data comes in to fill it up
2661			 * we will be able to slide it forward. Really I
2662			 * don't think this should happen :-0
2663			 */
2664
2665			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2666				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2667				    (uint32_t) asoc->mapping_array_size,
2668				    SCTP_MAP_SLIDE_NONE);
2669			}
2670		} else {
2671			int ii;
2672
2673			for (ii = 0; ii < distance; ii++) {
2674				asoc->mapping_array[ii] =
2675				    asoc->mapping_array[slide_from + ii];
2676			}
2677			for (ii = distance; ii <= slide_end; ii++) {
2678				asoc->mapping_array[ii] = 0;
2679			}
2680			asoc->mapping_array_base_tsn += (slide_from << 3);
2681			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2682				sctp_log_map(asoc->mapping_array_base_tsn,
2683				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2684				    SCTP_MAP_SLIDE_RESULT);
2685			}
2686		}
2687	}
2688	/*
2689	 * EY if doing nr_sacks then slide the nr_mapping_array accordingly
2690	 * please
2691	 */
2692	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2693
2694		nr_at = 0;
2695		for (nr_slide_from = 0; nr_slide_from < stcb->asoc.nr_mapping_array_size; nr_slide_from++) {
2696
2697			if (asoc->nr_mapping_array[nr_slide_from] == 0xff) {
2698				nr_at += 8;
2699				nr_last_all_ones = 1;
2700			} else {
2701				/* there is a 0 bit */
2702				nr_at += sctp_map_lookup_tab[asoc->nr_mapping_array[nr_slide_from]];
2703				nr_last_all_ones = 0;
2704				break;
2705			}
2706		}
2707
2708		nr_at++;
2709
2710		if (compare_with_wrap(asoc->cumulative_tsn,
2711		    asoc->highest_tsn_inside_nr_map, MAX_TSN) && (at >= 8)) {
2712			/* The complete array was completed by a single FR */
2713			/* higest becomes the cum-ack */
2714			int clr;
2715
2716			clr = (nr_at >> 3) + 1;
2717
2718			if (clr > asoc->nr_mapping_array_size)
2719				clr = asoc->nr_mapping_array_size;
2720
2721			memset(asoc->nr_mapping_array, 0, clr);
2722			/* base becomes one ahead of the cum-ack */
2723			asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2724			asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2725
2726		} else if (nr_at >= 8) {
2727			/* we can slide the mapping array down */
2728			/* Calculate the new byte postion we can move down */
2729
2730			/*
2731			 * now calculate the ceiling of the move using our
2732			 * highest TSN value
2733			 */
2734			if (asoc->highest_tsn_inside_nr_map >= asoc->nr_mapping_array_base_tsn) {
2735				nr_lgap = asoc->highest_tsn_inside_nr_map -
2736				    asoc->nr_mapping_array_base_tsn;
2737			} else {
2738				nr_lgap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) +
2739				    asoc->highest_tsn_inside_nr_map + 1;
2740			}
2741			nr_slide_end = nr_lgap >> 3;
2742			if (nr_slide_end < nr_slide_from) {
2743#ifdef INVARIANTS
2744				panic("impossible slide");
2745#else
2746				printf("impossible slide?\n");
2747				return;
2748#endif
2749			}
2750			if (nr_slide_end > asoc->nr_mapping_array_size) {
2751#ifdef INVARIANTS
2752				panic("would overrun buffer");
2753#else
2754				printf("Gak, would have overrun map end:%d nr_slide_end:%d\n",
2755				    asoc->nr_mapping_array_size, nr_slide_end);
2756				nr_slide_end = asoc->nr_mapping_array_size;
2757#endif
2758			}
2759			nr_distance = (nr_slide_end - nr_slide_from) + 1;
2760
2761			if (nr_distance + nr_slide_from > asoc->nr_mapping_array_size ||
2762			    nr_distance < 0) {
2763				/*
2764				 * Here we do NOT slide forward the array so
2765				 * that hopefully when more data comes in to
2766				 * fill it up we will be able to slide it
2767				 * forward. Really I don't think this should
2768				 * happen :-0
2769				 */
2770				;
2771			} else {
2772				int ii;
2773
2774				for (ii = 0; ii < nr_distance; ii++) {
2775					asoc->nr_mapping_array[ii] =
2776					    asoc->nr_mapping_array[nr_slide_from + ii];
2777				}
2778				for (ii = nr_distance; ii <= nr_slide_end; ii++) {
2779					asoc->nr_mapping_array[ii] = 0;
2780				}
2781				asoc->nr_mapping_array_base_tsn += (nr_slide_from << 3);
2782			}
2783		}
2784	}
2785	/*
2786	 * Now we need to see if we need to queue a sack or just start the
2787	 * timer (if allowed).
2788	 */
2789	if (ok_to_sack) {
2790		if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2791			/*
2792			 * Ok special case, in SHUTDOWN-SENT case. here we
2793			 * maker sure SACK timer is off and instead send a
2794			 * SHUTDOWN and a SACK
2795			 */
2796			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2797				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2798				    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2799			}
2800			sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2801			/*
2802			 * EY if nr_sacks used then send an nr-sack , a sack
2803			 * otherwise
2804			 */
2805			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
2806				sctp_send_nr_sack(stcb);
2807			else
2808				sctp_send_sack(stcb);
2809		} else {
2810			int is_a_gap;
2811
2812			/* is there a gap now ? */
2813			is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2814			    stcb->asoc.cumulative_tsn, MAX_TSN);
2815
2816			/*
2817			 * CMT DAC algorithm: increase number of packets
2818			 * received since last ack
2819			 */
2820			stcb->asoc.cmt_dac_pkts_rcvd++;
2821
2822			if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2823								 * SACK */
2824			    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2825								 * longer is one */
2826			    (stcb->asoc.numduptsns) ||	/* we have dup's */
2827			    (is_a_gap) ||	/* is still a gap */
2828			    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2829			    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2830			    ) {
2831
2832				if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2833				    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2834				    (stcb->asoc.send_sack == 0) &&
2835				    (stcb->asoc.numduptsns == 0) &&
2836				    (stcb->asoc.delayed_ack) &&
2837				    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2838
2839					/*
2840					 * CMT DAC algorithm: With CMT,
2841					 * delay acks even in the face of
2842					 *
2843					 * reordering. Therefore, if acks that
2844					 * do not have to be sent because of
2845					 * the above reasons, will be
2846					 * delayed. That is, acks that would
2847					 * have been sent due to gap reports
2848					 * will be delayed with DAC. Start
2849					 * the delayed ack timer.
2850					 */
2851					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2852					    stcb->sctp_ep, stcb, NULL);
2853				} else {
2854					/*
2855					 * Ok we must build a SACK since the
2856					 * timer is pending, we got our
2857					 * first packet OR there are gaps or
2858					 * duplicates.
2859					 */
2860					(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2861					/*
2862					 * EY if nr_sacks used then send an
2863					 * nr-sack , a sack otherwise
2864					 */
2865					if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2866						sctp_send_nr_sack(stcb);
2867					else
2868						sctp_send_sack(stcb);
2869				}
2870			} else {
2871				if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2872					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2873					    stcb->sctp_ep, stcb, NULL);
2874				}
2875			}
2876		}
2877	}
2878}
2879
2880void
2881sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2882{
2883	struct sctp_tmit_chunk *chk;
2884	uint32_t tsize;
2885	uint16_t nxt_todel;
2886
2887	if (asoc->fragmented_delivery_inprogress) {
2888		sctp_service_reassembly(stcb, asoc);
2889	}
2890	/* Can we proceed further, i.e. the PD-API is complete */
2891	if (asoc->fragmented_delivery_inprogress) {
2892		/* no */
2893		return;
2894	}
2895	/*
2896	 * Now is there some other chunk I can deliver from the reassembly
2897	 * queue.
2898	 */
2899doit_again:
2900	chk = TAILQ_FIRST(&asoc->reasmqueue);
2901	if (chk == NULL) {
2902		asoc->size_on_reasm_queue = 0;
2903		asoc->cnt_on_reasm_queue = 0;
2904		return;
2905	}
2906	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2907	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2908	    ((nxt_todel == chk->rec.data.stream_seq) ||
2909	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2910		/*
2911		 * Yep the first one is here. We setup to start reception,
2912		 * by backing down the TSN just in case we can't deliver.
2913		 */
2914
2915		/*
2916		 * Before we start though either all of the message should
2917		 * be here or 1/4 the socket buffer max or nothing on the
2918		 * delivery queue and something can be delivered.
2919		 */
2920		if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2921		    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
2922			asoc->fragmented_delivery_inprogress = 1;
2923			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2924			asoc->str_of_pdapi = chk->rec.data.stream_number;
2925			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2926			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2927			asoc->fragment_flags = chk->rec.data.rcv_flags;
2928			sctp_service_reassembly(stcb, asoc);
2929			if (asoc->fragmented_delivery_inprogress == 0) {
2930				goto doit_again;
2931			}
2932		}
2933	}
2934}
2935
2936int
2937sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2938    struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2939    struct sctp_nets *net, uint32_t * high_tsn)
2940{
2941	struct sctp_data_chunk *ch, chunk_buf;
2942	struct sctp_association *asoc;
2943	int num_chunks = 0;	/* number of control chunks processed */
2944	int stop_proc = 0;
2945	int chk_length, break_flag, last_chunk;
2946	int abort_flag = 0, was_a_gap = 0;
2947	struct mbuf *m;
2948
2949	/* set the rwnd */
2950	sctp_set_rwnd(stcb, &stcb->asoc);
2951
2952	m = *mm;
2953	SCTP_TCB_LOCK_ASSERT(stcb);
2954	asoc = &stcb->asoc;
2955	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2956	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2957		/* there was a gap before this data was processed */
2958		was_a_gap = 1;
2959	}
2960	/*
2961	 * setup where we got the last DATA packet from for any SACK that
2962	 * may need to go out. Don't bump the net. This is done ONLY when a
2963	 * chunk is assigned.
2964	 */
2965	asoc->last_data_chunk_from = net;
2966
2967	/*-
2968	 * Now before we proceed we must figure out if this is a wasted
2969	 * cluster... i.e. it is a small packet sent in and yet the driver
2970	 * underneath allocated a full cluster for it. If so we must copy it
2971	 * to a smaller mbuf and free up the cluster mbuf. This will help
2972	 * with cluster starvation. Note for __Panda__ we don't do this
2973	 * since it has clusters all the way down to 64 bytes.
2974	 */
2975	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2976		/* we only handle mbufs that are singletons.. not chains */
2977		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2978		if (m) {
2979			/* ok lets see if we can copy the data up */
2980			caddr_t *from, *to;
2981
2982			/* get the pointers and copy */
2983			to = mtod(m, caddr_t *);
2984			from = mtod((*mm), caddr_t *);
2985			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2986			/* copy the length and free up the old */
2987			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2988			sctp_m_freem(*mm);
2989			/* sucess, back copy */
2990			*mm = m;
2991		} else {
2992			/* We are in trouble in the mbuf world .. yikes */
2993			m = *mm;
2994		}
2995	}
2996	/* get pointer to the first chunk header */
2997	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2998	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2999	if (ch == NULL) {
3000		return (1);
3001	}
3002	/*
3003	 * process all DATA chunks...
3004	 */
3005	*high_tsn = asoc->cumulative_tsn;
3006	break_flag = 0;
3007	asoc->data_pkts_seen++;
3008	while (stop_proc == 0) {
3009		/* validate chunk length */
3010		chk_length = ntohs(ch->ch.chunk_length);
3011		if (length - *offset < chk_length) {
3012			/* all done, mutulated chunk */
3013			stop_proc = 1;
3014			break;
3015		}
3016		if (ch->ch.chunk_type == SCTP_DATA) {
3017			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
3018				/*
3019				 * Need to send an abort since we had a
3020				 * invalid data chunk.
3021				 */
3022				struct mbuf *op_err;
3023
3024				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
3025				    0, M_DONTWAIT, 1, MT_DATA);
3026
3027				if (op_err) {
3028					struct sctp_paramhdr *ph;
3029					uint32_t *ippp;
3030
3031					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
3032					    (2 * sizeof(uint32_t));
3033					ph = mtod(op_err, struct sctp_paramhdr *);
3034					ph->param_type =
3035					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3036					ph->param_length = htons(SCTP_BUF_LEN(op_err));
3037					ippp = (uint32_t *) (ph + 1);
3038					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
3039					ippp++;
3040					*ippp = asoc->cumulative_tsn;
3041
3042				}
3043				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
3044				sctp_abort_association(inp, stcb, m, iphlen, sh,
3045				    op_err, 0, net->port);
3046				return (2);
3047			}
3048#ifdef SCTP_AUDITING_ENABLED
3049			sctp_audit_log(0xB1, 0);
3050#endif
3051			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
3052				last_chunk = 1;
3053			} else {
3054				last_chunk = 0;
3055			}
3056			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
3057			    chk_length, net, high_tsn, &abort_flag, &break_flag,
3058			    last_chunk)) {
3059				num_chunks++;
3060			}
3061			if (abort_flag)
3062				return (2);
3063
3064			if (break_flag) {
3065				/*
3066				 * Set because of out of rwnd space and no
3067				 * drop rep space left.
3068				 */
3069				stop_proc = 1;
3070				break;
3071			}
3072		} else {
3073			/* not a data chunk in the data region */
3074			switch (ch->ch.chunk_type) {
3075			case SCTP_INITIATION:
3076			case SCTP_INITIATION_ACK:
3077			case SCTP_SELECTIVE_ACK:
3078			case SCTP_NR_SELECTIVE_ACK:	/* EY */
3079			case SCTP_HEARTBEAT_REQUEST:
3080			case SCTP_HEARTBEAT_ACK:
3081			case SCTP_ABORT_ASSOCIATION:
3082			case SCTP_SHUTDOWN:
3083			case SCTP_SHUTDOWN_ACK:
3084			case SCTP_OPERATION_ERROR:
3085			case SCTP_COOKIE_ECHO:
3086			case SCTP_COOKIE_ACK:
3087			case SCTP_ECN_ECHO:
3088			case SCTP_ECN_CWR:
3089			case SCTP_SHUTDOWN_COMPLETE:
3090			case SCTP_AUTHENTICATION:
3091			case SCTP_ASCONF_ACK:
3092			case SCTP_PACKET_DROPPED:
3093			case SCTP_STREAM_RESET:
3094			case SCTP_FORWARD_CUM_TSN:
3095			case SCTP_ASCONF:
3096				/*
3097				 * Now, what do we do with KNOWN chunks that
3098				 * are NOT in the right place?
3099				 *
3100				 * For now, I do nothing but ignore them. We
3101				 * may later want to add sysctl stuff to
3102				 * switch out and do either an ABORT() or
3103				 * possibly process them.
3104				 */
3105				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
3106					struct mbuf *op_err;
3107
3108					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
3109					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
3110					return (2);
3111				}
3112				break;
3113			default:
3114				/* unknown chunk type, use bit rules */
3115				if (ch->ch.chunk_type & 0x40) {
3116					/* Add a error report to the queue */
3117					struct mbuf *merr;
3118					struct sctp_paramhdr *phd;
3119
3120					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
3121					if (merr) {
3122						phd = mtod(merr, struct sctp_paramhdr *);
3123						/*
3124						 * We cheat and use param
3125						 * type since we did not
3126						 * bother to define a error
3127						 * cause struct. They are
3128						 * the same basic format
3129						 * with different names.
3130						 */
3131						phd->param_type =
3132						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
3133						phd->param_length =
3134						    htons(chk_length + sizeof(*phd));
3135						SCTP_BUF_LEN(merr) = sizeof(*phd);
3136						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
3137						    SCTP_SIZE32(chk_length),
3138						    M_DONTWAIT);
3139						if (SCTP_BUF_NEXT(merr)) {
3140							sctp_queue_op_err(stcb, merr);
3141						} else {
3142							sctp_m_freem(merr);
3143						}
3144					}
3145				}
3146				if ((ch->ch.chunk_type & 0x80) == 0) {
3147					/* discard the rest of this packet */
3148					stop_proc = 1;
3149				}	/* else skip this bad chunk and
3150					 * continue... */
3151				break;
3152			};	/* switch of chunk type */
3153		}
3154		*offset += SCTP_SIZE32(chk_length);
3155		if ((*offset >= length) || stop_proc) {
3156			/* no more data left in the mbuf chain */
3157			stop_proc = 1;
3158			continue;
3159		}
3160		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
3161		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
3162		if (ch == NULL) {
3163			*offset = length;
3164			stop_proc = 1;
3165			break;
3166
3167		}
3168	}			/* while */
3169	if (break_flag) {
3170		/*
3171		 * we need to report rwnd overrun drops.
3172		 */
3173		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
3174	}
3175	if (num_chunks) {
3176		/*
3177		 * Did we get data, if so update the time for auto-close and
3178		 * give peer credit for being alive.
3179		 */
3180		SCTP_STAT_INCR(sctps_recvpktwithdata);
3181		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3182			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3183			    stcb->asoc.overall_error_count,
3184			    0,
3185			    SCTP_FROM_SCTP_INDATA,
3186			    __LINE__);
3187		}
3188		stcb->asoc.overall_error_count = 0;
3189		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
3190	}
3191	/* now service all of the reassm queue if needed */
3192	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
3193		sctp_service_queues(stcb, asoc);
3194
3195	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
3196		/* Assure that we ack right away */
3197		stcb->asoc.send_sack = 1;
3198	}
3199	/* Start a sack timer or QUEUE a SACK for sending */
3200	if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
3201	    (stcb->asoc.mapping_array[0] != 0xff)) {
3202		if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
3203		    (stcb->asoc.delayed_ack == 0) ||
3204		    (stcb->asoc.numduptsns) ||
3205		    (stcb->asoc.send_sack == 1)) {
3206			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3207				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
3208			}
3209			/*
3210			 * EY if nr_sacks used then send an nr-sack , a sack
3211			 * otherwise
3212			 */
3213			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
3214				sctp_send_nr_sack(stcb);
3215			else
3216				sctp_send_sack(stcb);
3217		} else {
3218			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3219				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
3220				    stcb->sctp_ep, stcb, NULL);
3221			}
3222		}
3223	} else {
3224		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
3225	}
3226	if (abort_flag)
3227		return (2);
3228
3229	return (0);
3230}
3231
3232static void
3233sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3234    struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3235    uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3236    int num_seg, int *ecn_seg_sums)
3237{
3238	/************************************************/
3239	/* process fragments and update sendqueue        */
3240	/************************************************/
3241	struct sctp_sack *sack;
3242	struct sctp_gap_ack_block *frag, block;
3243	struct sctp_tmit_chunk *tp1;
3244	int i, j;
3245	unsigned int theTSN;
3246	int num_frs = 0;
3247
3248	uint16_t frag_strt, frag_end, primary_flag_set;
3249	u_long last_frag_high;
3250
3251	/*
3252	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
3253	 */
3254	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3255		primary_flag_set = 1;
3256	} else {
3257		primary_flag_set = 0;
3258	}
3259	sack = &ch->sack;
3260
3261	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3262	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3263	*offset += sizeof(block);
3264	if (frag == NULL) {
3265		return;
3266	}
3267	tp1 = NULL;
3268	last_frag_high = 0;
3269	for (i = 0; i < num_seg; i++) {
3270		frag_strt = ntohs(frag->start);
3271		frag_end = ntohs(frag->end);
3272		/* some sanity checks on the fragment offsets */
3273		if (frag_strt > frag_end) {
3274			/* this one is malformed, skip */
3275			frag++;
3276			continue;
3277		}
3278		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3279		    MAX_TSN))
3280			*biggest_tsn_acked = frag_end + last_tsn;
3281
3282		/* mark acked dgs and find out the highestTSN being acked */
3283		if (tp1 == NULL) {
3284			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3285
3286			/* save the locations of the last frags */
3287			last_frag_high = frag_end + last_tsn;
3288		} else {
3289			/*
3290			 * now lets see if we need to reset the queue due to
3291			 * a out-of-order SACK fragment
3292			 */
3293			if (compare_with_wrap(frag_strt + last_tsn,
3294			    last_frag_high, MAX_TSN)) {
3295				/*
3296				 * if the new frag starts after the last TSN
3297				 * frag covered, we are ok and this one is
3298				 * beyond the last one
3299				 */
3300				;
3301			} else {
3302				/*
3303				 * ok, they have reset us, so we need to
3304				 * reset the queue this will cause extra
3305				 * hunting but hey, they chose the
3306				 * performance hit when they failed to order
3307				 * their gaps
3308				 */
3309				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3310			}
3311			last_frag_high = frag_end + last_tsn;
3312		}
3313		for (j = frag_strt; j <= frag_end; j++) {
3314			theTSN = j + last_tsn;
3315			while (tp1) {
3316				if (tp1->rec.data.doing_fast_retransmit)
3317					num_frs++;
3318
3319				/*
3320				 * CMT: CUCv2 algorithm. For each TSN being
3321				 * processed from the sent queue, track the
3322				 * next expected pseudo-cumack, or
3323				 * rtx_pseudo_cumack, if required. Separate
3324				 * cumack trackers for first transmissions,
3325				 * and retransmissions.
3326				 */
3327				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3328				    (tp1->snd_count == 1)) {
3329					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
3330					tp1->whoTo->find_pseudo_cumack = 0;
3331				}
3332				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3333				    (tp1->snd_count > 1)) {
3334					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
3335					tp1->whoTo->find_rtx_pseudo_cumack = 0;
3336				}
3337				if (tp1->rec.data.TSN_seq == theTSN) {
3338					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3339						/*
3340						 * must be held until
3341						 * cum-ack passes
3342						 */
3343						/*
3344						 * ECN Nonce: Add the nonce
3345						 * value to the sender's
3346						 * nonce sum
3347						 */
3348						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3349							/*-
3350							 * If it is less than RESEND, it is
3351							 * now no-longer in flight.
3352							 * Higher values may already be set
3353							 * via previous Gap Ack Blocks...
3354							 * i.e. ACKED or RESEND.
3355							 */
3356							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3357							    *biggest_newly_acked_tsn, MAX_TSN)) {
3358								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
3359							}
3360							/*
3361							 * CMT: SFR algo
3362							 * (and HTNA) - set
3363							 * saw_newack to 1
3364							 * for dest being
3365							 * newly acked.
3366							 * update
3367							 * this_sack_highest_
3368							 * newack if
3369							 * appropriate.
3370							 */
3371							if (tp1->rec.data.chunk_was_revoked == 0)
3372								tp1->whoTo->saw_newack = 1;
3373
3374							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3375							    tp1->whoTo->this_sack_highest_newack,
3376							    MAX_TSN)) {
3377								tp1->whoTo->this_sack_highest_newack =
3378								    tp1->rec.data.TSN_seq;
3379							}
3380							/*
3381							 * CMT DAC algo:
3382							 * also update
3383							 * this_sack_lowest_n
3384							 * ewack
3385							 */
3386							if (*this_sack_lowest_newack == 0) {
3387								if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3388									sctp_log_sack(*this_sack_lowest_newack,
3389									    last_tsn,
3390									    tp1->rec.data.TSN_seq,
3391									    0,
3392									    0,
3393									    SCTP_LOG_TSN_ACKED);
3394								}
3395								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
3396							}
3397							/*
3398							 * CMT: CUCv2
3399							 * algorithm. If
3400							 * (rtx-)pseudo-cumac
3401							 * k for corresp
3402							 * dest is being
3403							 * acked, then we
3404							 * have a new
3405							 * (rtx-)pseudo-cumac
3406							 * k. Set
3407							 * new_(rtx_)pseudo_c
3408							 * umack to TRUE so
3409							 * that the cwnd for
3410							 * this dest can be
3411							 * updated. Also
3412							 * trigger search
3413							 * for the next
3414							 * expected
3415							 * (rtx-)pseudo-cumac
3416							 * k. Separate
3417							 * pseudo_cumack
3418							 * trackers for
3419							 * first
3420							 * transmissions and
3421							 * retransmissions.
3422							 */
3423							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
3424								if (tp1->rec.data.chunk_was_revoked == 0) {
3425									tp1->whoTo->new_pseudo_cumack = 1;
3426								}
3427								tp1->whoTo->find_pseudo_cumack = 1;
3428							}
3429							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3430								sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3431							}
3432							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3433								if (tp1->rec.data.chunk_was_revoked == 0) {
3434									tp1->whoTo->new_pseudo_cumack = 1;
3435								}
3436								tp1->whoTo->find_rtx_pseudo_cumack = 1;
3437							}
3438							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3439								sctp_log_sack(*biggest_newly_acked_tsn,
3440								    last_tsn,
3441								    tp1->rec.data.TSN_seq,
3442								    frag_strt,
3443								    frag_end,
3444								    SCTP_LOG_TSN_ACKED);
3445							}
3446							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3447								sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3448								    tp1->whoTo->flight_size,
3449								    tp1->book_size,
3450								    (uintptr_t) tp1->whoTo,
3451								    tp1->rec.data.TSN_seq);
3452							}
3453							sctp_flight_size_decrease(tp1);
3454							sctp_total_flight_decrease(stcb, tp1);
3455
3456							tp1->whoTo->net_ack += tp1->send_size;
3457							if (tp1->snd_count < 2) {
3458								/*
3459								 * True
3460								 * non-retran
3461								 * smited
3462								 * chunk */
3463								tp1->whoTo->net_ack2 += tp1->send_size;
3464
3465								/*
3466								 * update RTO
3467								 * too ? */
3468								if (tp1->do_rtt) {
3469									tp1->whoTo->RTO =
3470									    sctp_calculate_rto(stcb,
3471									    asoc,
3472									    tp1->whoTo,
3473									    &tp1->sent_rcv_time,
3474									    sctp_align_safe_nocopy);
3475									tp1->do_rtt = 0;
3476								}
3477							}
3478						}
3479						if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3480							(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3481							(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3482							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3483							    asoc->this_sack_highest_gap,
3484							    MAX_TSN)) {
3485								asoc->this_sack_highest_gap =
3486								    tp1->rec.data.TSN_seq;
3487							}
3488							if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3489								sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3490#ifdef SCTP_AUDITING_ENABLED
3491								sctp_audit_log(0xB2,
3492								    (asoc->sent_queue_retran_cnt & 0x000000ff));
3493#endif
3494							}
3495						}
3496						/*
3497						 * All chunks NOT UNSENT
3498						 * fall through here and are
3499						 * marked
3500						 */
3501						tp1->sent = SCTP_DATAGRAM_MARKED;
3502						if (tp1->rec.data.chunk_was_revoked) {
3503							/* deflate the cwnd */
3504							tp1->whoTo->cwnd -= tp1->book_size;
3505							tp1->rec.data.chunk_was_revoked = 0;
3506						}
3507					}
3508					break;
3509				}	/* if (tp1->TSN_seq == theTSN) */
3510				if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3511				    MAX_TSN))
3512					break;
3513
3514				tp1 = TAILQ_NEXT(tp1, sctp_next);
3515			}	/* end while (tp1) */
3516		}		/* end for (j = fragStart */
3517		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3518		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3519		*offset += sizeof(block);
3520		if (frag == NULL) {
3521			break;
3522		}
3523	}
3524	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3525		if (num_frs)
3526			sctp_log_fr(*biggest_tsn_acked,
3527			    *biggest_newly_acked_tsn,
3528			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3529	}
3530}
3531
3532static void
3533sctp_check_for_revoked(struct sctp_tcb *stcb,
3534    struct sctp_association *asoc, uint32_t cumack,
3535    u_long biggest_tsn_acked)
3536{
3537	struct sctp_tmit_chunk *tp1;
3538	int tot_revoked = 0;
3539
3540	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3541	while (tp1) {
3542		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3543		    MAX_TSN)) {
3544			/*
3545			 * ok this guy is either ACK or MARKED. If it is
3546			 * ACKED it has been previously acked but not this
3547			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3548			 * again.
3549			 */
3550			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3551			    MAX_TSN))
3552				break;
3553
3554
3555			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3556				/* it has been revoked */
3557				tp1->sent = SCTP_DATAGRAM_SENT;
3558				tp1->rec.data.chunk_was_revoked = 1;
3559				/*
3560				 * We must add this stuff back in to assure
3561				 * timers and such get started.
3562				 */
3563				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3564					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3565					    tp1->whoTo->flight_size,
3566					    tp1->book_size,
3567					    (uintptr_t) tp1->whoTo,
3568					    tp1->rec.data.TSN_seq);
3569				}
3570				sctp_flight_size_increase(tp1);
3571				sctp_total_flight_increase(stcb, tp1);
3572				/*
3573				 * We inflate the cwnd to compensate for our
3574				 * artificial inflation of the flight_size.
3575				 */
3576				tp1->whoTo->cwnd += tp1->book_size;
3577				tot_revoked++;
3578				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3579					sctp_log_sack(asoc->last_acked_seq,
3580					    cumack,
3581					    tp1->rec.data.TSN_seq,
3582					    0,
3583					    0,
3584					    SCTP_LOG_TSN_REVOKED);
3585				}
3586			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3587				/* it has been re-acked in this SACK */
3588				tp1->sent = SCTP_DATAGRAM_ACKED;
3589			}
3590		}
3591		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3592			break;
3593		tp1 = TAILQ_NEXT(tp1, sctp_next);
3594	}
3595	if (tot_revoked > 0) {
3596		/*
3597		 * Setup the ecn nonce re-sync point. We do this since once
3598		 * data is revoked we begin to retransmit things, which do
3599		 * NOT have the ECN bits set. This means we are now out of
3600		 * sync and must wait until we get back in sync with the
3601		 * peer to check ECN bits.
3602		 */
3603		tp1 = TAILQ_FIRST(&asoc->send_queue);
3604		if (tp1 == NULL) {
3605			asoc->nonce_resync_tsn = asoc->sending_seq;
3606		} else {
3607			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3608		}
3609		asoc->nonce_wait_for_ecne = 0;
3610		asoc->nonce_sum_check = 0;
3611	}
3612}
3613
3614
3615static void
3616sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3617    u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3618{
3619	struct sctp_tmit_chunk *tp1;
3620	int strike_flag = 0;
3621	struct timeval now;
3622	int tot_retrans = 0;
3623	uint32_t sending_seq;
3624	struct sctp_nets *net;
3625	int num_dests_sacked = 0;
3626
3627	/*
3628	 * select the sending_seq, this is either the next thing ready to be
3629	 * sent but not transmitted, OR, the next seq we assign.
3630	 */
3631	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3632	if (tp1 == NULL) {
3633		sending_seq = asoc->sending_seq;
3634	} else {
3635		sending_seq = tp1->rec.data.TSN_seq;
3636	}
3637
3638	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3639	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3640		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3641			if (net->saw_newack)
3642				num_dests_sacked++;
3643		}
3644	}
3645	if (stcb->asoc.peer_supports_prsctp) {
3646		(void)SCTP_GETTIME_TIMEVAL(&now);
3647	}
3648	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3649	while (tp1) {
3650		strike_flag = 0;
3651		if (tp1->no_fr_allowed) {
3652			/* this one had a timeout or something */
3653			tp1 = TAILQ_NEXT(tp1, sctp_next);
3654			continue;
3655		}
3656		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3657			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3658				sctp_log_fr(biggest_tsn_newly_acked,
3659				    tp1->rec.data.TSN_seq,
3660				    tp1->sent,
3661				    SCTP_FR_LOG_CHECK_STRIKE);
3662		}
3663		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3664		    MAX_TSN) ||
3665		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3666			/* done */
3667			break;
3668		}
3669		if (stcb->asoc.peer_supports_prsctp) {
3670			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3671				/* Is it expired? */
3672				if (
3673				/*
3674				 * TODO sctp_constants.h needs alternative
3675				 * time macros when _KERNEL is undefined.
3676				 */
3677				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3678				    ) {
3679					/* Yes so drop it */
3680					if (tp1->data != NULL) {
3681						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3682						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3683						    SCTP_SO_NOT_LOCKED);
3684					}
3685					tp1 = TAILQ_NEXT(tp1, sctp_next);
3686					continue;
3687				}
3688			}
3689			if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3690				/* Has it been retransmitted tv_sec times? */
3691				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3692					/* Yes, so drop it */
3693					if (tp1->data != NULL) {
3694						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3695						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3696						    SCTP_SO_NOT_LOCKED);
3697					}
3698					tp1 = TAILQ_NEXT(tp1, sctp_next);
3699					continue;
3700				}
3701			}
3702		}
3703		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3704		    asoc->this_sack_highest_gap, MAX_TSN)) {
3705			/* we are beyond the tsn in the sack  */
3706			break;
3707		}
3708		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3709			/* either a RESEND, ACKED, or MARKED */
3710			/* skip */
3711			tp1 = TAILQ_NEXT(tp1, sctp_next);
3712			continue;
3713		}
3714		/*
3715		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3716		 */
3717		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3718			/*
3719			 * No new acks were receieved for data sent to this
3720			 * dest. Therefore, according to the SFR algo for
3721			 * CMT, no data sent to this dest can be marked for
3722			 * FR using this SACK.
3723			 */
3724			tp1 = TAILQ_NEXT(tp1, sctp_next);
3725			continue;
3726		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3727		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3728			/*
3729			 * CMT: New acks were receieved for data sent to
3730			 * this dest. But no new acks were seen for data
3731			 * sent after tp1. Therefore, according to the SFR
3732			 * algo for CMT, tp1 cannot be marked for FR using
3733			 * this SACK. This step covers part of the DAC algo
3734			 * and the HTNA algo as well.
3735			 */
3736			tp1 = TAILQ_NEXT(tp1, sctp_next);
3737			continue;
3738		}
3739		/*
3740		 * Here we check to see if we were have already done a FR
3741		 * and if so we see if the biggest TSN we saw in the sack is
3742		 * smaller than the recovery point. If so we don't strike
3743		 * the tsn... otherwise we CAN strike the TSN.
3744		 */
3745		/*
3746		 * @@@ JRI: Check for CMT if (accum_moved &&
3747		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3748		 * 0)) {
3749		 */
3750		if (accum_moved && asoc->fast_retran_loss_recovery) {
3751			/*
3752			 * Strike the TSN if in fast-recovery and cum-ack
3753			 * moved.
3754			 */
3755			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3756				sctp_log_fr(biggest_tsn_newly_acked,
3757				    tp1->rec.data.TSN_seq,
3758				    tp1->sent,
3759				    SCTP_FR_LOG_STRIKE_CHUNK);
3760			}
3761			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3762				tp1->sent++;
3763			}
3764			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3765				/*
3766				 * CMT DAC algorithm: If SACK flag is set to
3767				 * 0, then lowest_newack test will not pass
3768				 * because it would have been set to the
3769				 * cumack earlier. If not already to be
3770				 * rtx'd, If not a mixed sack and if tp1 is
3771				 * not between two sacked TSNs, then mark by
3772				 * one more. NOTE that we are marking by one
3773				 * additional time since the SACK DAC flag
3774				 * indicates that two packets have been
3775				 * received after this missing TSN.
3776				 */
3777				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3778				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3779					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3780						sctp_log_fr(16 + num_dests_sacked,
3781						    tp1->rec.data.TSN_seq,
3782						    tp1->sent,
3783						    SCTP_FR_LOG_STRIKE_CHUNK);
3784					}
3785					tp1->sent++;
3786				}
3787			}
3788		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3789			/*
3790			 * For those that have done a FR we must take
3791			 * special consideration if we strike. I.e the
3792			 * biggest_newly_acked must be higher than the
3793			 * sending_seq at the time we did the FR.
3794			 */
3795			if (
3796#ifdef SCTP_FR_TO_ALTERNATE
3797			/*
3798			 * If FR's go to new networks, then we must only do
3799			 * this for singly homed asoc's. However if the FR's
3800			 * go to the same network (Armando's work) then its
3801			 * ok to FR multiple times.
3802			 */
3803			    (asoc->numnets < 2)
3804#else
3805			    (1)
3806#endif
3807			    ) {
3808
3809				if ((compare_with_wrap(biggest_tsn_newly_acked,
3810				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3811				    (biggest_tsn_newly_acked ==
3812				    tp1->rec.data.fast_retran_tsn)) {
3813					/*
3814					 * Strike the TSN, since this ack is
3815					 * beyond where things were when we
3816					 * did a FR.
3817					 */
3818					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3819						sctp_log_fr(biggest_tsn_newly_acked,
3820						    tp1->rec.data.TSN_seq,
3821						    tp1->sent,
3822						    SCTP_FR_LOG_STRIKE_CHUNK);
3823					}
3824					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3825						tp1->sent++;
3826					}
3827					strike_flag = 1;
3828					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3829						/*
3830						 * CMT DAC algorithm: If
3831						 * SACK flag is set to 0,
3832						 * then lowest_newack test
3833						 * will not pass because it
3834						 * would have been set to
3835						 * the cumack earlier. If
3836						 * not already to be rtx'd,
3837						 * If not a mixed sack and
3838						 * if tp1 is not between two
3839						 * sacked TSNs, then mark by
3840						 * one more. NOTE that we
3841						 * are marking by one
3842						 * additional time since the
3843						 * SACK DAC flag indicates
3844						 * that two packets have
3845						 * been received after this
3846						 * missing TSN.
3847						 */
3848						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3849						    (num_dests_sacked == 1) &&
3850						    compare_with_wrap(this_sack_lowest_newack,
3851						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3852							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3853								sctp_log_fr(32 + num_dests_sacked,
3854								    tp1->rec.data.TSN_seq,
3855								    tp1->sent,
3856								    SCTP_FR_LOG_STRIKE_CHUNK);
3857							}
3858							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3859								tp1->sent++;
3860							}
3861						}
3862					}
3863				}
3864			}
3865			/*
3866			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3867			 * algo covers HTNA.
3868			 */
3869		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3870		    biggest_tsn_newly_acked, MAX_TSN)) {
3871			/*
3872			 * We don't strike these: This is the  HTNA
3873			 * algorithm i.e. we don't strike If our TSN is
3874			 * larger than the Highest TSN Newly Acked.
3875			 */
3876			;
3877		} else {
3878			/* Strike the TSN */
3879			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3880				sctp_log_fr(biggest_tsn_newly_acked,
3881				    tp1->rec.data.TSN_seq,
3882				    tp1->sent,
3883				    SCTP_FR_LOG_STRIKE_CHUNK);
3884			}
3885			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3886				tp1->sent++;
3887			}
3888			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3889				/*
3890				 * CMT DAC algorithm: If SACK flag is set to
3891				 * 0, then lowest_newack test will not pass
3892				 * because it would have been set to the
3893				 * cumack earlier. If not already to be
3894				 * rtx'd, If not a mixed sack and if tp1 is
3895				 * not between two sacked TSNs, then mark by
3896				 * one more. NOTE that we are marking by one
3897				 * additional time since the SACK DAC flag
3898				 * indicates that two packets have been
3899				 * received after this missing TSN.
3900				 */
3901				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3902				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3903					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3904						sctp_log_fr(48 + num_dests_sacked,
3905						    tp1->rec.data.TSN_seq,
3906						    tp1->sent,
3907						    SCTP_FR_LOG_STRIKE_CHUNK);
3908					}
3909					tp1->sent++;
3910				}
3911			}
3912		}
3913		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3914			/* Increment the count to resend */
3915			struct sctp_nets *alt;
3916
3917			/* printf("OK, we are now ready to FR this guy\n"); */
3918			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3919				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3920				    0, SCTP_FR_MARKED);
3921			}
3922			if (strike_flag) {
3923				/* This is a subsequent FR */
3924				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3925			}
3926			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3927			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3928				/*
3929				 * CMT: Using RTX_SSTHRESH policy for CMT.
3930				 * If CMT is being used, then pick dest with
3931				 * largest ssthresh for any retransmission.
3932				 */
3933				tp1->no_fr_allowed = 1;
3934				alt = tp1->whoTo;
3935				/* sa_ignore NO_NULL_CHK */
3936				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3937					/*
3938					 * JRS 5/18/07 - If CMT PF is on,
3939					 * use the PF version of
3940					 * find_alt_net()
3941					 */
3942					alt = sctp_find_alternate_net(stcb, alt, 2);
3943				} else {
3944					/*
3945					 * JRS 5/18/07 - If only CMT is on,
3946					 * use the CMT version of
3947					 * find_alt_net()
3948					 */
3949					/* sa_ignore NO_NULL_CHK */
3950					alt = sctp_find_alternate_net(stcb, alt, 1);
3951				}
3952				if (alt == NULL) {
3953					alt = tp1->whoTo;
3954				}
3955				/*
3956				 * CUCv2: If a different dest is picked for
3957				 * the retransmission, then new
3958				 * (rtx-)pseudo_cumack needs to be tracked
3959				 * for orig dest. Let CUCv2 track new (rtx-)
3960				 * pseudo-cumack always.
3961				 */
3962				if (tp1->whoTo) {
3963					tp1->whoTo->find_pseudo_cumack = 1;
3964					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3965				}
3966			} else {/* CMT is OFF */
3967
3968#ifdef SCTP_FR_TO_ALTERNATE
3969				/* Can we find an alternate? */
3970				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3971#else
3972				/*
3973				 * default behavior is to NOT retransmit
3974				 * FR's to an alternate. Armando Caro's
3975				 * paper details why.
3976				 */
3977				alt = tp1->whoTo;
3978#endif
3979			}
3980
3981			tp1->rec.data.doing_fast_retransmit = 1;
3982			tot_retrans++;
3983			/* mark the sending seq for possible subsequent FR's */
3984			/*
3985			 * printf("Marking TSN for FR new value %x\n",
3986			 * (uint32_t)tpi->rec.data.TSN_seq);
3987			 */
3988			if (TAILQ_EMPTY(&asoc->send_queue)) {
3989				/*
3990				 * If the queue of send is empty then its
3991				 * the next sequence number that will be
3992				 * assigned so we subtract one from this to
3993				 * get the one we last sent.
3994				 */
3995				tp1->rec.data.fast_retran_tsn = sending_seq;
3996			} else {
3997				/*
3998				 * If there are chunks on the send queue
3999				 * (unsent data that has made it from the
4000				 * stream queues but not out the door, we
4001				 * take the first one (which will have the
4002				 * lowest TSN) and subtract one to get the
4003				 * one we last sent.
4004				 */
4005				struct sctp_tmit_chunk *ttt;
4006
4007				ttt = TAILQ_FIRST(&asoc->send_queue);
4008				tp1->rec.data.fast_retran_tsn =
4009				    ttt->rec.data.TSN_seq;
4010			}
4011
4012			if (tp1->do_rtt) {
4013				/*
4014				 * this guy had a RTO calculation pending on
4015				 * it, cancel it
4016				 */
4017				tp1->do_rtt = 0;
4018			}
4019			/* fix counts and things */
4020			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4021				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
4022				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
4023				    tp1->book_size,
4024				    (uintptr_t) tp1->whoTo,
4025				    tp1->rec.data.TSN_seq);
4026			}
4027			if (tp1->whoTo) {
4028				tp1->whoTo->net_ack++;
4029				sctp_flight_size_decrease(tp1);
4030			}
4031			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4032				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
4033				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
4034			}
4035			/* add back to the rwnd */
4036			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
4037
4038			/* remove from the total flight */
4039			sctp_total_flight_decrease(stcb, tp1);
4040			if (alt != tp1->whoTo) {
4041				/* yes, there is an alternate. */
4042				sctp_free_remote_addr(tp1->whoTo);
4043				/* sa_ignore FREED_MEMORY */
4044				tp1->whoTo = alt;
4045				atomic_add_int(&alt->ref_count, 1);
4046			}
4047		}
4048		tp1 = TAILQ_NEXT(tp1, sctp_next);
4049	}			/* while (tp1) */
4050
4051	if (tot_retrans > 0) {
4052		/*
4053		 * Setup the ecn nonce re-sync point. We do this since once
4054		 * we go to FR something we introduce a Karn's rule scenario
4055		 * and won't know the totals for the ECN bits.
4056		 */
4057		asoc->nonce_resync_tsn = sending_seq;
4058		asoc->nonce_wait_for_ecne = 0;
4059		asoc->nonce_sum_check = 0;
4060	}
4061}
4062
4063struct sctp_tmit_chunk *
4064sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
4065    struct sctp_association *asoc)
4066{
4067	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
4068	struct timeval now;
4069	int now_filled = 0;
4070
4071	if (asoc->peer_supports_prsctp == 0) {
4072		return (NULL);
4073	}
4074	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4075	while (tp1) {
4076		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
4077		    tp1->sent != SCTP_DATAGRAM_RESEND) {
4078			/* no chance to advance, out of here */
4079			break;
4080		}
4081		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4082			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4083				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4084				    asoc->advanced_peer_ack_point,
4085				    tp1->rec.data.TSN_seq, 0, 0);
4086			}
4087		}
4088		if (!PR_SCTP_ENABLED(tp1->flags)) {
4089			/*
4090			 * We can't fwd-tsn past any that are reliable aka
4091			 * retransmitted until the asoc fails.
4092			 */
4093			break;
4094		}
4095		if (!now_filled) {
4096			(void)SCTP_GETTIME_TIMEVAL(&now);
4097			now_filled = 1;
4098		}
4099		tp2 = TAILQ_NEXT(tp1, sctp_next);
4100		/*
4101		 * now we got a chunk which is marked for another
4102		 * retransmission to a PR-stream but has run out its chances
4103		 * already maybe OR has been marked to skip now. Can we skip
4104		 * it if its a resend?
4105		 */
4106		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
4107		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
4108			/*
4109			 * Now is this one marked for resend and its time is
4110			 * now up?
4111			 */
4112			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
4113				/* Yes so drop it */
4114				if (tp1->data) {
4115					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
4116					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
4117					    SCTP_SO_NOT_LOCKED);
4118				}
4119			} else {
4120				/*
4121				 * No, we are done when hit one for resend
4122				 * whos time as not expired.
4123				 */
4124				break;
4125			}
4126		}
4127		/*
4128		 * Ok now if this chunk is marked to drop it we can clean up
4129		 * the chunk, advance our peer ack point and we can check
4130		 * the next chunk.
4131		 */
4132		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4133			/* advance PeerAckPoint goes forward */
4134			if (compare_with_wrap(tp1->rec.data.TSN_seq,
4135			    asoc->advanced_peer_ack_point,
4136			    MAX_TSN)) {
4137
4138				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
4139				a_adv = tp1;
4140			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
4141				/* No update but we do save the chk */
4142				a_adv = tp1;
4143			}
4144		} else {
4145			/*
4146			 * If it is still in RESEND we can advance no
4147			 * further
4148			 */
4149			break;
4150		}
4151		/*
4152		 * If we hit here we just dumped tp1, move to next tsn on
4153		 * sent queue.
4154		 */
4155		tp1 = tp2;
4156	}
4157	return (a_adv);
4158}
4159
4160static int
4161sctp_fs_audit(struct sctp_association *asoc)
4162{
4163	struct sctp_tmit_chunk *chk;
4164	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
4165	int entry_flight, entry_cnt, ret;
4166
4167	entry_flight = asoc->total_flight;
4168	entry_cnt = asoc->total_flight_count;
4169	ret = 0;
4170
4171	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
4172		return (0);
4173
4174	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4175		if (chk->sent < SCTP_DATAGRAM_RESEND) {
4176			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
4177			    chk->rec.data.TSN_seq,
4178			    chk->send_size,
4179			    chk->snd_count
4180			    );
4181			inflight++;
4182		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
4183			resend++;
4184		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
4185			inbetween++;
4186		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
4187			above++;
4188		} else {
4189			acked++;
4190		}
4191	}
4192
4193	if ((inflight > 0) || (inbetween > 0)) {
4194#ifdef INVARIANTS
4195		panic("Flight size-express incorrect? \n");
4196#else
4197		printf("asoc->total_flight:%d cnt:%d\n",
4198		    entry_flight, entry_cnt);
4199
4200		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
4201		    inflight, inbetween, resend, above, acked);
4202		ret = 1;
4203#endif
4204	}
4205	return (ret);
4206}
4207
4208
4209static void
4210sctp_window_probe_recovery(struct sctp_tcb *stcb,
4211    struct sctp_association *asoc,
4212    struct sctp_nets *net,
4213    struct sctp_tmit_chunk *tp1)
4214{
4215	tp1->window_probe = 0;
4216	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
4217		/* TSN's skipped we do NOT move back. */
4218		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
4219		    tp1->whoTo->flight_size,
4220		    tp1->book_size,
4221		    (uintptr_t) tp1->whoTo,
4222		    tp1->rec.data.TSN_seq);
4223		return;
4224	}
4225	/* First setup this by shrinking flight */
4226	sctp_flight_size_decrease(tp1);
4227	sctp_total_flight_decrease(stcb, tp1);
4228	/* Now mark for resend */
4229	tp1->sent = SCTP_DATAGRAM_RESEND;
4230	asoc->sent_queue_retran_cnt++;
4231	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4232		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
4233		    tp1->whoTo->flight_size,
4234		    tp1->book_size,
4235		    (uintptr_t) tp1->whoTo,
4236		    tp1->rec.data.TSN_seq);
4237	}
4238}
4239
4240void
4241sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4242    uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4243{
4244	struct sctp_nets *net;
4245	struct sctp_association *asoc;
4246	struct sctp_tmit_chunk *tp1, *tp2;
4247	uint32_t old_rwnd;
4248	int win_probe_recovery = 0;
4249	int win_probe_recovered = 0;
4250	int j, done_once = 0;
4251
4252	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4253		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4254		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4255	}
4256	SCTP_TCB_LOCK_ASSERT(stcb);
4257#ifdef SCTP_ASOCLOG_OF_TSNS
4258	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
4259	stcb->asoc.cumack_log_at++;
4260	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4261		stcb->asoc.cumack_log_at = 0;
4262	}
4263#endif
4264	asoc = &stcb->asoc;
4265	old_rwnd = asoc->peers_rwnd;
4266	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
4267		/* old ack */
4268		return;
4269	} else if (asoc->last_acked_seq == cumack) {
4270		/* Window update sack */
4271		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4272		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4273		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4274			/* SWS sender side engages */
4275			asoc->peers_rwnd = 0;
4276		}
4277		if (asoc->peers_rwnd > old_rwnd) {
4278			goto again;
4279		}
4280		return;
4281	}
4282	/* First setup for CC stuff */
4283	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4284		net->prev_cwnd = net->cwnd;
4285		net->net_ack = 0;
4286		net->net_ack2 = 0;
4287
4288		/*
4289		 * CMT: Reset CUC and Fast recovery algo variables before
4290		 * SACK processing
4291		 */
4292		net->new_pseudo_cumack = 0;
4293		net->will_exit_fast_recovery = 0;
4294	}
4295	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4296		uint32_t send_s;
4297
4298		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4299			tp1 = TAILQ_LAST(&asoc->sent_queue,
4300			    sctpchunk_listhead);
4301			send_s = tp1->rec.data.TSN_seq + 1;
4302		} else {
4303			send_s = asoc->sending_seq;
4304		}
4305		if ((cumack == send_s) ||
4306		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
4307#ifndef INVARIANTS
4308			struct mbuf *oper;
4309
4310#endif
4311#ifdef INVARIANTS
4312			panic("Impossible sack 1");
4313#else
4314			*abort_now = 1;
4315			/* XXX */
4316			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4317			    0, M_DONTWAIT, 1, MT_DATA);
4318			if (oper) {
4319				struct sctp_paramhdr *ph;
4320				uint32_t *ippp;
4321
4322				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4323				    sizeof(uint32_t);
4324				ph = mtod(oper, struct sctp_paramhdr *);
4325				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4326				ph->param_length = htons(SCTP_BUF_LEN(oper));
4327				ippp = (uint32_t *) (ph + 1);
4328				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4329			}
4330			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4331			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4332			return;
4333#endif
4334		}
4335	}
4336	asoc->this_sack_highest_gap = cumack;
4337	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4338		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4339		    stcb->asoc.overall_error_count,
4340		    0,
4341		    SCTP_FROM_SCTP_INDATA,
4342		    __LINE__);
4343	}
4344	stcb->asoc.overall_error_count = 0;
4345	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
4346		/* process the new consecutive TSN first */
4347		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4348		while (tp1) {
4349			tp2 = TAILQ_NEXT(tp1, sctp_next);
4350			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4351			    MAX_TSN) ||
4352			    cumack == tp1->rec.data.TSN_seq) {
4353				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4354					printf("Warning, an unsent is now acked?\n");
4355				}
4356				/*
4357				 * ECN Nonce: Add the nonce to the sender's
4358				 * nonce sum
4359				 */
4360				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4361				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4362					/*
4363					 * If it is less than ACKED, it is
4364					 * now no-longer in flight. Higher
4365					 * values may occur during marking
4366					 */
4367					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4368						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4369							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4370							    tp1->whoTo->flight_size,
4371							    tp1->book_size,
4372							    (uintptr_t) tp1->whoTo,
4373							    tp1->rec.data.TSN_seq);
4374						}
4375						sctp_flight_size_decrease(tp1);
4376						/* sa_ignore NO_NULL_CHK */
4377						sctp_total_flight_decrease(stcb, tp1);
4378					}
4379					tp1->whoTo->net_ack += tp1->send_size;
4380					if (tp1->snd_count < 2) {
4381						/*
4382						 * True non-retransmited
4383						 * chunk
4384						 */
4385						tp1->whoTo->net_ack2 +=
4386						    tp1->send_size;
4387
4388						/* update RTO too? */
4389						if (tp1->do_rtt) {
4390							tp1->whoTo->RTO =
4391							/*
4392							 * sa_ignore
4393							 * NO_NULL_CHK
4394							 */
4395							    sctp_calculate_rto(stcb,
4396							    asoc, tp1->whoTo,
4397							    &tp1->sent_rcv_time,
4398							    sctp_align_safe_nocopy);
4399							tp1->do_rtt = 0;
4400						}
4401					}
4402					/*
4403					 * CMT: CUCv2 algorithm. From the
4404					 * cumack'd TSNs, for each TSN being
4405					 * acked for the first time, set the
4406					 * following variables for the
4407					 * corresp destination.
4408					 * new_pseudo_cumack will trigger a
4409					 * cwnd update.
4410					 * find_(rtx_)pseudo_cumack will
4411					 * trigger search for the next
4412					 * expected (rtx-)pseudo-cumack.
4413					 */
4414					tp1->whoTo->new_pseudo_cumack = 1;
4415					tp1->whoTo->find_pseudo_cumack = 1;
4416					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4417
4418					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4419						/* sa_ignore NO_NULL_CHK */
4420						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4421					}
4422				}
4423				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4424					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4425				}
4426				if (tp1->rec.data.chunk_was_revoked) {
4427					/* deflate the cwnd */
4428					tp1->whoTo->cwnd -= tp1->book_size;
4429					tp1->rec.data.chunk_was_revoked = 0;
4430				}
4431				tp1->sent = SCTP_DATAGRAM_ACKED;
4432				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4433				if (tp1->data) {
4434					/* sa_ignore NO_NULL_CHK */
4435					sctp_free_bufspace(stcb, asoc, tp1, 1);
4436					sctp_m_freem(tp1->data);
4437				}
4438				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4439					sctp_log_sack(asoc->last_acked_seq,
4440					    cumack,
4441					    tp1->rec.data.TSN_seq,
4442					    0,
4443					    0,
4444					    SCTP_LOG_FREE_SENT);
4445				}
4446				tp1->data = NULL;
4447				asoc->sent_queue_cnt--;
4448				sctp_free_a_chunk(stcb, tp1);
4449				tp1 = tp2;
4450			} else {
4451				break;
4452			}
4453		}
4454
4455	}
4456	/* sa_ignore NO_NULL_CHK */
4457	if (stcb->sctp_socket) {
4458#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4459		struct socket *so;
4460
4461#endif
4462
4463		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4464		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4465			/* sa_ignore NO_NULL_CHK */
4466			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4467		}
4468#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4469		so = SCTP_INP_SO(stcb->sctp_ep);
4470		atomic_add_int(&stcb->asoc.refcnt, 1);
4471		SCTP_TCB_UNLOCK(stcb);
4472		SCTP_SOCKET_LOCK(so, 1);
4473		SCTP_TCB_LOCK(stcb);
4474		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4475		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4476			/* assoc was freed while we were unlocked */
4477			SCTP_SOCKET_UNLOCK(so, 1);
4478			return;
4479		}
4480#endif
4481		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4482#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4483		SCTP_SOCKET_UNLOCK(so, 1);
4484#endif
4485	} else {
4486		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4487			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4488		}
4489	}
4490
4491	/* JRS - Use the congestion control given in the CC module */
4492	if (asoc->last_acked_seq != cumack)
4493		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4494
4495	asoc->last_acked_seq = cumack;
4496
4497	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4498		/* nothing left in-flight */
4499		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4500			net->flight_size = 0;
4501			net->partial_bytes_acked = 0;
4502		}
4503		asoc->total_flight = 0;
4504		asoc->total_flight_count = 0;
4505	}
4506	/* ECN Nonce updates */
4507	if (asoc->ecn_nonce_allowed) {
4508		if (asoc->nonce_sum_check) {
4509			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4510				if (asoc->nonce_wait_for_ecne == 0) {
4511					struct sctp_tmit_chunk *lchk;
4512
4513					lchk = TAILQ_FIRST(&asoc->send_queue);
4514					asoc->nonce_wait_for_ecne = 1;
4515					if (lchk) {
4516						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4517					} else {
4518						asoc->nonce_wait_tsn = asoc->sending_seq;
4519					}
4520				} else {
4521					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4522					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4523						/*
4524						 * Misbehaving peer. We need
4525						 * to react to this guy
4526						 */
4527						asoc->ecn_allowed = 0;
4528						asoc->ecn_nonce_allowed = 0;
4529					}
4530				}
4531			}
4532		} else {
4533			/* See if Resynchronization Possible */
4534			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4535				asoc->nonce_sum_check = 1;
4536				/*
4537				 * now we must calculate what the base is.
4538				 * We do this based on two things, we know
4539				 * the total's for all the segments
4540				 * gap-acked in the SACK (none), We also
4541				 * know the SACK's nonce sum, its in
4542				 * nonce_sum_flag. So we can build a truth
4543				 * table to back-calculate the new value of
4544				 * asoc->nonce_sum_expect_base:
4545				 *
4546				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4547				 * 1                    0 1 0 1 1 1
4548				 * 1 0
4549				 */
4550				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4551			}
4552		}
4553	}
4554	/* RWND update */
4555	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4556	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4557	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4558		/* SWS sender side engages */
4559		asoc->peers_rwnd = 0;
4560	}
4561	if (asoc->peers_rwnd > old_rwnd) {
4562		win_probe_recovery = 1;
4563	}
4564	/* Now assure a timer where data is queued at */
4565again:
4566	j = 0;
4567	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4568		int to_ticks;
4569
4570		if (win_probe_recovery && (net->window_probe)) {
4571			win_probe_recovered = 1;
4572			/*
4573			 * Find first chunk that was used with window probe
4574			 * and clear the sent
4575			 */
4576			/* sa_ignore FREED_MEMORY */
4577			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4578				if (tp1->window_probe) {
4579					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4580					break;
4581				}
4582			}
4583		}
4584		if (net->RTO == 0) {
4585			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4586		} else {
4587			to_ticks = MSEC_TO_TICKS(net->RTO);
4588		}
4589		if (net->flight_size) {
4590			j++;
4591			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4592			    sctp_timeout_handler, &net->rxt_timer);
4593			if (net->window_probe) {
4594				net->window_probe = 0;
4595			}
4596		} else {
4597			if (net->window_probe) {
4598				/*
4599				 * In window probes we must assure a timer
4600				 * is still running there
4601				 */
4602				net->window_probe = 0;
4603				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4604					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4605					    sctp_timeout_handler, &net->rxt_timer);
4606				}
4607			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4608				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4609				    stcb, net,
4610				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4611			}
4612			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4613				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4614					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4615					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4616					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4617				}
4618			}
4619		}
4620	}
4621	if ((j == 0) &&
4622	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4623	    (asoc->sent_queue_retran_cnt == 0) &&
4624	    (win_probe_recovered == 0) &&
4625	    (done_once == 0)) {
4626		/*
4627		 * huh, this should not happen unless all packets are
4628		 * PR-SCTP and marked to skip of course.
4629		 */
4630		if (sctp_fs_audit(asoc)) {
4631			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4632				if (net->flight_size) {
4633					net->flight_size = 0;
4634				}
4635			}
4636			asoc->total_flight = 0;
4637			asoc->total_flight_count = 0;
4638			asoc->sent_queue_retran_cnt = 0;
4639			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4640				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4641					sctp_flight_size_increase(tp1);
4642					sctp_total_flight_increase(stcb, tp1);
4643				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4644					asoc->sent_queue_retran_cnt++;
4645				}
4646			}
4647		}
4648		done_once = 1;
4649		goto again;
4650	}
4651	/**********************************/
4652	/* Now what about shutdown issues */
4653	/**********************************/
4654	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4655		/* nothing left on sendqueue.. consider done */
4656		/* clean up */
4657		if ((asoc->stream_queue_cnt == 1) &&
4658		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4659		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4660		    (asoc->locked_on_sending)
4661		    ) {
4662			struct sctp_stream_queue_pending *sp;
4663
4664			/*
4665			 * I may be in a state where we got all across.. but
4666			 * cannot write more due to a shutdown... we abort
4667			 * since the user did not indicate EOR in this case.
4668			 * The sp will be cleaned during free of the asoc.
4669			 */
4670			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4671			    sctp_streamhead);
4672			if ((sp) && (sp->length == 0)) {
4673				/* Let cleanup code purge it */
4674				if (sp->msg_is_complete) {
4675					asoc->stream_queue_cnt--;
4676				} else {
4677					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4678					asoc->locked_on_sending = NULL;
4679					asoc->stream_queue_cnt--;
4680				}
4681			}
4682		}
4683		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4684		    (asoc->stream_queue_cnt == 0)) {
4685			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4686				/* Need to abort here */
4687				struct mbuf *oper;
4688
4689		abort_out_now:
4690				*abort_now = 1;
4691				/* XXX */
4692				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4693				    0, M_DONTWAIT, 1, MT_DATA);
4694				if (oper) {
4695					struct sctp_paramhdr *ph;
4696					uint32_t *ippp;
4697
4698					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4699					    sizeof(uint32_t);
4700					ph = mtod(oper, struct sctp_paramhdr *);
4701					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4702					ph->param_length = htons(SCTP_BUF_LEN(oper));
4703					ippp = (uint32_t *) (ph + 1);
4704					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4705				}
4706				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4707				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4708			} else {
4709				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4710				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4711					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4712				}
4713				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4714				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4715				sctp_stop_timers_for_shutdown(stcb);
4716				sctp_send_shutdown(stcb,
4717				    stcb->asoc.primary_destination);
4718				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4719				    stcb->sctp_ep, stcb, asoc->primary_destination);
4720				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4721				    stcb->sctp_ep, stcb, asoc->primary_destination);
4722			}
4723		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4724		    (asoc->stream_queue_cnt == 0)) {
4725			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4726				goto abort_out_now;
4727			}
4728			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4729			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4730			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4731			sctp_send_shutdown_ack(stcb,
4732			    stcb->asoc.primary_destination);
4733
4734			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4735			    stcb->sctp_ep, stcb, asoc->primary_destination);
4736		}
4737	}
4738	/*********************************************/
4739	/* Here we perform PR-SCTP procedures        */
4740	/* (section 4.2)                             */
4741	/*********************************************/
4742	/* C1. update advancedPeerAckPoint */
4743	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4744		asoc->advanced_peer_ack_point = cumack;
4745	}
4746	/* PR-Sctp issues need to be addressed too */
4747	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4748		struct sctp_tmit_chunk *lchk;
4749		uint32_t old_adv_peer_ack_point;
4750
4751		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4752		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4753		/* C3. See if we need to send a Fwd-TSN */
4754		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4755		    MAX_TSN)) {
4756			/*
4757			 * ISSUE with ECN, see FWD-TSN processing for notes
4758			 * on issues that will occur when the ECN NONCE
4759			 * stuff is put into SCTP for cross checking.
4760			 */
4761			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4762			    MAX_TSN)) {
4763				send_forward_tsn(stcb, asoc);
4764				/*
4765				 * ECN Nonce: Disable Nonce Sum check when
4766				 * FWD TSN is sent and store resync tsn
4767				 */
4768				asoc->nonce_sum_check = 0;
4769				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4770			} else if (lchk) {
4771				/* try to FR fwd-tsn's that get lost too */
4772				lchk->rec.data.fwd_tsn_cnt++;
4773				if (lchk->rec.data.fwd_tsn_cnt > 3) {
4774					send_forward_tsn(stcb, asoc);
4775					lchk->rec.data.fwd_tsn_cnt = 0;
4776				}
4777			}
4778		}
4779		if (lchk) {
4780			/* Assure a timer is up */
4781			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4782			    stcb->sctp_ep, stcb, lchk->whoTo);
4783		}
4784	}
4785	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4786		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4787		    rwnd,
4788		    stcb->asoc.peers_rwnd,
4789		    stcb->asoc.total_flight,
4790		    stcb->asoc.total_output_queue_size);
4791	}
4792}
4793
4794void
4795sctp_handle_sack(struct mbuf *m, int offset,
4796    struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4797    struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4798{
4799	struct sctp_association *asoc;
4800	struct sctp_sack *sack;
4801	struct sctp_tmit_chunk *tp1, *tp2;
4802	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4803	         this_sack_lowest_newack;
4804	uint32_t sav_cum_ack;
4805	uint16_t num_seg, num_dup;
4806	uint16_t wake_him = 0;
4807	unsigned int sack_length;
4808	uint32_t send_s = 0;
4809	long j;
4810	int accum_moved = 0;
4811	int will_exit_fast_recovery = 0;
4812	uint32_t a_rwnd, old_rwnd;
4813	int win_probe_recovery = 0;
4814	int win_probe_recovered = 0;
4815	struct sctp_nets *net = NULL;
4816	int nonce_sum_flag, ecn_seg_sums = 0;
4817	int done_once;
4818	uint8_t reneged_all = 0;
4819	uint8_t cmt_dac_flag;
4820
4821	/*
4822	 * we take any chance we can to service our queues since we cannot
4823	 * get awoken when the socket is read from :<
4824	 */
4825	/*
4826	 * Now perform the actual SACK handling: 1) Verify that it is not an
4827	 * old sack, if so discard. 2) If there is nothing left in the send
4828	 * queue (cum-ack is equal to last acked) then you have a duplicate
4829	 * too, update any rwnd change and verify no timers are running.
4830	 * then return. 3) Process any new consequtive data i.e. cum-ack
4831	 * moved process these first and note that it moved. 4) Process any
4832	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4833	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4834	 * sync up flightsizes and things, stop all timers and also check
4835	 * for shutdown_pending state. If so then go ahead and send off the
4836	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4837	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4838	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4839	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4840	 * if in shutdown_recv state.
4841	 */
4842	SCTP_TCB_LOCK_ASSERT(stcb);
4843	sack = &ch->sack;
4844	/* CMT DAC algo */
4845	this_sack_lowest_newack = 0;
4846	j = 0;
4847	sack_length = (unsigned int)sack_len;
4848	/* ECN Nonce */
4849	SCTP_STAT_INCR(sctps_slowpath_sack);
4850	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4851	cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4852#ifdef SCTP_ASOCLOG_OF_TSNS
4853	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4854	stcb->asoc.cumack_log_at++;
4855	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4856		stcb->asoc.cumack_log_at = 0;
4857	}
4858#endif
4859	num_seg = ntohs(sack->num_gap_ack_blks);
4860	a_rwnd = rwnd;
4861
4862	/* CMT DAC algo */
4863	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4864	num_dup = ntohs(sack->num_dup_tsns);
4865
4866	old_rwnd = stcb->asoc.peers_rwnd;
4867	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4868		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4869		    stcb->asoc.overall_error_count,
4870		    0,
4871		    SCTP_FROM_SCTP_INDATA,
4872		    __LINE__);
4873	}
4874	stcb->asoc.overall_error_count = 0;
4875	asoc = &stcb->asoc;
4876	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4877		sctp_log_sack(asoc->last_acked_seq,
4878		    cum_ack,
4879		    0,
4880		    num_seg,
4881		    num_dup,
4882		    SCTP_LOG_NEW_SACK);
4883	}
4884	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4885		int off_to_dup, iii;
4886		uint32_t *dupdata, dblock;
4887
4888		off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4889		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4890			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4891			    sizeof(uint32_t), (uint8_t *) & dblock);
4892			off_to_dup += sizeof(uint32_t);
4893			if (dupdata) {
4894				for (iii = 0; iii < num_dup; iii++) {
4895					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4896					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4897					    sizeof(uint32_t), (uint8_t *) & dblock);
4898					if (dupdata == NULL)
4899						break;
4900					off_to_dup += sizeof(uint32_t);
4901				}
4902			}
4903		} else {
4904			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4905			    off_to_dup, num_dup, sack_length, num_seg);
4906		}
4907	}
4908	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4909		/* reality check */
4910		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4911			tp1 = TAILQ_LAST(&asoc->sent_queue,
4912			    sctpchunk_listhead);
4913			send_s = tp1->rec.data.TSN_seq + 1;
4914		} else {
4915			send_s = asoc->sending_seq;
4916		}
4917		if (cum_ack == send_s ||
4918		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4919#ifndef INVARIANTS
4920			struct mbuf *oper;
4921
4922#endif
4923#ifdef INVARIANTS
4924	hopeless_peer:
4925			panic("Impossible sack 1");
4926#else
4927
4928
4929			/*
4930			 * no way, we have not even sent this TSN out yet.
4931			 * Peer is hopelessly messed up with us.
4932			 */
4933	hopeless_peer:
4934			*abort_now = 1;
4935			/* XXX */
4936			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4937			    0, M_DONTWAIT, 1, MT_DATA);
4938			if (oper) {
4939				struct sctp_paramhdr *ph;
4940				uint32_t *ippp;
4941
4942				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4943				    sizeof(uint32_t);
4944				ph = mtod(oper, struct sctp_paramhdr *);
4945				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4946				ph->param_length = htons(SCTP_BUF_LEN(oper));
4947				ippp = (uint32_t *) (ph + 1);
4948				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4949			}
4950			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4951			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4952			return;
4953#endif
4954		}
4955	}
4956	/**********************/
4957	/* 1) check the range */
4958	/**********************/
4959	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4960		/* acking something behind */
4961		return;
4962	}
4963	sav_cum_ack = asoc->last_acked_seq;
4964
4965	/* update the Rwnd of the peer */
4966	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4967	    TAILQ_EMPTY(&asoc->send_queue) &&
4968	    (asoc->stream_queue_cnt == 0)
4969	    ) {
4970		/* nothing left on send/sent and strmq */
4971		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4972			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4973			    asoc->peers_rwnd, 0, 0, a_rwnd);
4974		}
4975		asoc->peers_rwnd = a_rwnd;
4976		if (asoc->sent_queue_retran_cnt) {
4977			asoc->sent_queue_retran_cnt = 0;
4978		}
4979		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4980			/* SWS sender side engages */
4981			asoc->peers_rwnd = 0;
4982		}
4983		/* stop any timers */
4984		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4985			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4986			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4987			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4988				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4989					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4990					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4991					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4992				}
4993			}
4994			net->partial_bytes_acked = 0;
4995			net->flight_size = 0;
4996		}
4997		asoc->total_flight = 0;
4998		asoc->total_flight_count = 0;
4999		return;
5000	}
5001	/*
5002	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
5003	 * things. The total byte count acked is tracked in netAckSz AND
5004	 * netAck2 is used to track the total bytes acked that are un-
5005	 * amibguious and were never retransmitted. We track these on a per
5006	 * destination address basis.
5007	 */
5008	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5009		net->prev_cwnd = net->cwnd;
5010		net->net_ack = 0;
5011		net->net_ack2 = 0;
5012
5013		/*
5014		 * CMT: Reset CUC and Fast recovery algo variables before
5015		 * SACK processing
5016		 */
5017		net->new_pseudo_cumack = 0;
5018		net->will_exit_fast_recovery = 0;
5019	}
5020	/* process the new consecutive TSN first */
5021	tp1 = TAILQ_FIRST(&asoc->sent_queue);
5022	while (tp1) {
5023		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
5024		    MAX_TSN) ||
5025		    last_tsn == tp1->rec.data.TSN_seq) {
5026			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
5027				/*
5028				 * ECN Nonce: Add the nonce to the sender's
5029				 * nonce sum
5030				 */
5031				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
5032				accum_moved = 1;
5033				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
5034					/*
5035					 * If it is less than ACKED, it is
5036					 * now no-longer in flight. Higher
5037					 * values may occur during marking
5038					 */
5039					if ((tp1->whoTo->dest_state &
5040					    SCTP_ADDR_UNCONFIRMED) &&
5041					    (tp1->snd_count < 2)) {
5042						/*
5043						 * If there was no retran
5044						 * and the address is
5045						 * un-confirmed and we sent
5046						 * there and are now
5047						 * sacked.. its confirmed,
5048						 * mark it so.
5049						 */
5050						tp1->whoTo->dest_state &=
5051						    ~SCTP_ADDR_UNCONFIRMED;
5052					}
5053					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5054						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5055							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
5056							    tp1->whoTo->flight_size,
5057							    tp1->book_size,
5058							    (uintptr_t) tp1->whoTo,
5059							    tp1->rec.data.TSN_seq);
5060						}
5061						sctp_flight_size_decrease(tp1);
5062						sctp_total_flight_decrease(stcb, tp1);
5063					}
5064					tp1->whoTo->net_ack += tp1->send_size;
5065
5066					/* CMT SFR and DAC algos */
5067					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
5068					tp1->whoTo->saw_newack = 1;
5069
5070					if (tp1->snd_count < 2) {
5071						/*
5072						 * True non-retransmited
5073						 * chunk
5074						 */
5075						tp1->whoTo->net_ack2 +=
5076						    tp1->send_size;
5077
5078						/* update RTO too? */
5079						if (tp1->do_rtt) {
5080							tp1->whoTo->RTO =
5081							    sctp_calculate_rto(stcb,
5082							    asoc, tp1->whoTo,
5083							    &tp1->sent_rcv_time,
5084							    sctp_align_safe_nocopy);
5085							tp1->do_rtt = 0;
5086						}
5087					}
5088					/*
5089					 * CMT: CUCv2 algorithm. From the
5090					 * cumack'd TSNs, for each TSN being
5091					 * acked for the first time, set the
5092					 * following variables for the
5093					 * corresp destination.
5094					 * new_pseudo_cumack will trigger a
5095					 * cwnd update.
5096					 * find_(rtx_)pseudo_cumack will
5097					 * trigger search for the next
5098					 * expected (rtx-)pseudo-cumack.
5099					 */
5100					tp1->whoTo->new_pseudo_cumack = 1;
5101					tp1->whoTo->find_pseudo_cumack = 1;
5102					tp1->whoTo->find_rtx_pseudo_cumack = 1;
5103
5104
5105					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5106						sctp_log_sack(asoc->last_acked_seq,
5107						    cum_ack,
5108						    tp1->rec.data.TSN_seq,
5109						    0,
5110						    0,
5111						    SCTP_LOG_TSN_ACKED);
5112					}
5113					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
5114						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
5115					}
5116				}
5117				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5118					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
5119#ifdef SCTP_AUDITING_ENABLED
5120					sctp_audit_log(0xB3,
5121					    (asoc->sent_queue_retran_cnt & 0x000000ff));
5122#endif
5123				}
5124				if (tp1->rec.data.chunk_was_revoked) {
5125					/* deflate the cwnd */
5126					tp1->whoTo->cwnd -= tp1->book_size;
5127					tp1->rec.data.chunk_was_revoked = 0;
5128				}
5129				tp1->sent = SCTP_DATAGRAM_ACKED;
5130			}
5131		} else {
5132			break;
5133		}
5134		tp1 = TAILQ_NEXT(tp1, sctp_next);
5135	}
5136	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
5137	/* always set this up to cum-ack */
5138	asoc->this_sack_highest_gap = last_tsn;
5139
5140	/* Move offset up to point to gaps/dups */
5141	offset += sizeof(struct sctp_sack_chunk);
5142	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
5143
5144		/* skip corrupt segments */
5145		goto skip_segments;
5146	}
5147	if (num_seg > 0) {
5148
5149		/*
5150		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
5151		 * to be greater than the cumack. Also reset saw_newack to 0
5152		 * for all dests.
5153		 */
5154		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5155			net->saw_newack = 0;
5156			net->this_sack_highest_newack = last_tsn;
5157		}
5158
5159		/*
5160		 * thisSackHighestGap will increase while handling NEW
5161		 * segments this_sack_highest_newack will increase while
5162		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
5163		 * used for CMT DAC algo. saw_newack will also change.
5164		 */
5165		sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
5166		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
5167		    num_seg, &ecn_seg_sums);
5168
5169		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
5170			/*
5171			 * validate the biggest_tsn_acked in the gap acks if
5172			 * strict adherence is wanted.
5173			 */
5174			if ((biggest_tsn_acked == send_s) ||
5175			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
5176				/*
5177				 * peer is either confused or we are under
5178				 * attack. We must abort.
5179				 */
5180				goto hopeless_peer;
5181			}
5182		}
5183	}
5184skip_segments:
5185	/*******************************************/
5186	/* cancel ALL T3-send timer if accum moved */
5187	/*******************************************/
5188	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
5189		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5190			if (net->new_pseudo_cumack)
5191				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5192				    stcb, net,
5193				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
5194
5195		}
5196	} else {
5197		if (accum_moved) {
5198			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5199				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5200				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
5201			}
5202		}
5203	}
5204	/********************************************/
5205	/* drop the acked chunks from the sendqueue */
5206	/********************************************/
5207	asoc->last_acked_seq = cum_ack;
5208
5209	tp1 = TAILQ_FIRST(&asoc->sent_queue);
5210	if (tp1 == NULL)
5211		goto done_with_it;
5212	do {
5213		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
5214		    MAX_TSN)) {
5215			break;
5216		}
5217		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
5218			/* no more sent on list */
5219			printf("Warning, tp1->sent == %d and its now acked?\n",
5220			    tp1->sent);
5221		}
5222		tp2 = TAILQ_NEXT(tp1, sctp_next);
5223		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
5224		if (tp1->pr_sctp_on) {
5225			if (asoc->pr_sctp_cnt != 0)
5226				asoc->pr_sctp_cnt--;
5227		}
5228		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
5229		    (asoc->total_flight > 0)) {
5230#ifdef INVARIANTS
5231			panic("Warning flight size is postive and should be 0");
5232#else
5233			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
5234			    asoc->total_flight);
5235#endif
5236			asoc->total_flight = 0;
5237		}
5238		if (tp1->data) {
5239			/* sa_ignore NO_NULL_CHK */
5240			sctp_free_bufspace(stcb, asoc, tp1, 1);
5241			sctp_m_freem(tp1->data);
5242			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5243				asoc->sent_queue_cnt_removeable--;
5244			}
5245		}
5246		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5247			sctp_log_sack(asoc->last_acked_seq,
5248			    cum_ack,
5249			    tp1->rec.data.TSN_seq,
5250			    0,
5251			    0,
5252			    SCTP_LOG_FREE_SENT);
5253		}
5254		tp1->data = NULL;
5255		asoc->sent_queue_cnt--;
5256		sctp_free_a_chunk(stcb, tp1);
5257		wake_him++;
5258		tp1 = tp2;
5259	} while (tp1 != NULL);
5260
5261done_with_it:
5262	/* sa_ignore NO_NULL_CHK */
5263	if ((wake_him) && (stcb->sctp_socket)) {
5264#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5265		struct socket *so;
5266
5267#endif
5268		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
5269		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5270			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
5271		}
5272#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5273		so = SCTP_INP_SO(stcb->sctp_ep);
5274		atomic_add_int(&stcb->asoc.refcnt, 1);
5275		SCTP_TCB_UNLOCK(stcb);
5276		SCTP_SOCKET_LOCK(so, 1);
5277		SCTP_TCB_LOCK(stcb);
5278		atomic_subtract_int(&stcb->asoc.refcnt, 1);
5279		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5280			/* assoc was freed while we were unlocked */
5281			SCTP_SOCKET_UNLOCK(so, 1);
5282			return;
5283		}
5284#endif
5285		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
5286#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5287		SCTP_SOCKET_UNLOCK(so, 1);
5288#endif
5289	} else {
5290		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5291			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
5292		}
5293	}
5294
5295	if (asoc->fast_retran_loss_recovery && accum_moved) {
5296		if (compare_with_wrap(asoc->last_acked_seq,
5297		    asoc->fast_recovery_tsn, MAX_TSN) ||
5298		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
5299			/* Setup so we will exit RFC2582 fast recovery */
5300			will_exit_fast_recovery = 1;
5301		}
5302	}
5303	/*
5304	 * Check for revoked fragments:
5305	 *
5306	 * if Previous sack - Had no frags then we can't have any revoked if
5307	 * Previous sack - Had frag's then - If we now have frags aka
5308	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
5309	 * some of them. else - The peer revoked all ACKED fragments, since
5310	 * we had some before and now we have NONE.
5311	 */
5312
5313	if (num_seg)
5314		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
5315	else if (asoc->saw_sack_with_frags) {
5316		int cnt_revoked = 0;
5317
5318		tp1 = TAILQ_FIRST(&asoc->sent_queue);
5319		if (tp1 != NULL) {
5320			/* Peer revoked all dg's marked or acked */
5321			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5322				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
5323				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
5324					tp1->sent = SCTP_DATAGRAM_SENT;
5325					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5326						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
5327						    tp1->whoTo->flight_size,
5328						    tp1->book_size,
5329						    (uintptr_t) tp1->whoTo,
5330						    tp1->rec.data.TSN_seq);
5331					}
5332					sctp_flight_size_increase(tp1);
5333					sctp_total_flight_increase(stcb, tp1);
5334					tp1->rec.data.chunk_was_revoked = 1;
5335					/*
5336					 * To ensure that this increase in
5337					 * flightsize, which is artificial,
5338					 * does not throttle the sender, we
5339					 * also increase the cwnd
5340					 * artificially.
5341					 */
5342					tp1->whoTo->cwnd += tp1->book_size;
5343					cnt_revoked++;
5344				}
5345			}
5346			if (cnt_revoked) {
5347				reneged_all = 1;
5348			}
5349		}
5350		asoc->saw_sack_with_frags = 0;
5351	}
5352	if (num_seg)
5353		asoc->saw_sack_with_frags = 1;
5354	else
5355		asoc->saw_sack_with_frags = 0;
5356
5357	/* JRS - Use the congestion control given in the CC module */
5358	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5359
5360	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5361		/* nothing left in-flight */
5362		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5363			/* stop all timers */
5364			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5365				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5366					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5367					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5368					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5369				}
5370			}
5371			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5372			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5373			net->flight_size = 0;
5374			net->partial_bytes_acked = 0;
5375		}
5376		asoc->total_flight = 0;
5377		asoc->total_flight_count = 0;
5378	}
5379	/**********************************/
5380	/* Now what about shutdown issues */
5381	/**********************************/
5382	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5383		/* nothing left on sendqueue.. consider done */
5384		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5385			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5386			    asoc->peers_rwnd, 0, 0, a_rwnd);
5387		}
5388		asoc->peers_rwnd = a_rwnd;
5389		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5390			/* SWS sender side engages */
5391			asoc->peers_rwnd = 0;
5392		}
5393		/* clean up */
5394		if ((asoc->stream_queue_cnt == 1) &&
5395		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5396		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5397		    (asoc->locked_on_sending)
5398		    ) {
5399			struct sctp_stream_queue_pending *sp;
5400
5401			/*
5402			 * I may be in a state where we got all across.. but
5403			 * cannot write more due to a shutdown... we abort
5404			 * since the user did not indicate EOR in this case.
5405			 */
5406			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5407			    sctp_streamhead);
5408			if ((sp) && (sp->length == 0)) {
5409				asoc->locked_on_sending = NULL;
5410				if (sp->msg_is_complete) {
5411					asoc->stream_queue_cnt--;
5412				} else {
5413					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5414					asoc->stream_queue_cnt--;
5415				}
5416			}
5417		}
5418		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5419		    (asoc->stream_queue_cnt == 0)) {
5420			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5421				/* Need to abort here */
5422				struct mbuf *oper;
5423
5424		abort_out_now:
5425				*abort_now = 1;
5426				/* XXX */
5427				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5428				    0, M_DONTWAIT, 1, MT_DATA);
5429				if (oper) {
5430					struct sctp_paramhdr *ph;
5431					uint32_t *ippp;
5432
5433					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5434					    sizeof(uint32_t);
5435					ph = mtod(oper, struct sctp_paramhdr *);
5436					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5437					ph->param_length = htons(SCTP_BUF_LEN(oper));
5438					ippp = (uint32_t *) (ph + 1);
5439					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5440				}
5441				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5442				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5443				return;
5444			} else {
5445				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5446				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5447					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5448				}
5449				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5450				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5451				sctp_stop_timers_for_shutdown(stcb);
5452				sctp_send_shutdown(stcb,
5453				    stcb->asoc.primary_destination);
5454				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5455				    stcb->sctp_ep, stcb, asoc->primary_destination);
5456				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5457				    stcb->sctp_ep, stcb, asoc->primary_destination);
5458			}
5459			return;
5460		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5461		    (asoc->stream_queue_cnt == 0)) {
5462			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5463				goto abort_out_now;
5464			}
5465			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5466			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5467			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5468			sctp_send_shutdown_ack(stcb,
5469			    stcb->asoc.primary_destination);
5470
5471			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5472			    stcb->sctp_ep, stcb, asoc->primary_destination);
5473			return;
5474		}
5475	}
5476	/*
5477	 * Now here we are going to recycle net_ack for a different use...
5478	 * HEADS UP.
5479	 */
5480	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5481		net->net_ack = 0;
5482	}
5483
5484	/*
5485	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5486	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5487	 * automatically ensure that.
5488	 */
5489	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5490		this_sack_lowest_newack = cum_ack;
5491	}
5492	if (num_seg > 0) {
5493		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5494		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5495	}
5496	/* JRS - Use the congestion control given in the CC module */
5497	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5498
5499	/******************************************************************
5500	 *  Here we do the stuff with ECN Nonce checking.
5501	 *  We basically check to see if the nonce sum flag was incorrect
5502	 *  or if resynchronization needs to be done. Also if we catch a
5503	 *  misbehaving receiver we give him the kick.
5504	 ******************************************************************/
5505
5506	if (asoc->ecn_nonce_allowed) {
5507		if (asoc->nonce_sum_check) {
5508			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5509				if (asoc->nonce_wait_for_ecne == 0) {
5510					struct sctp_tmit_chunk *lchk;
5511
5512					lchk = TAILQ_FIRST(&asoc->send_queue);
5513					asoc->nonce_wait_for_ecne = 1;
5514					if (lchk) {
5515						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5516					} else {
5517						asoc->nonce_wait_tsn = asoc->sending_seq;
5518					}
5519				} else {
5520					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5521					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5522						/*
5523						 * Misbehaving peer. We need
5524						 * to react to this guy
5525						 */
5526						asoc->ecn_allowed = 0;
5527						asoc->ecn_nonce_allowed = 0;
5528					}
5529				}
5530			}
5531		} else {
5532			/* See if Resynchronization Possible */
5533			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5534				asoc->nonce_sum_check = 1;
5535				/*
5536				 * now we must calculate what the base is.
5537				 * We do this based on two things, we know
5538				 * the total's for all the segments
5539				 * gap-acked in the SACK, its stored in
5540				 * ecn_seg_sums. We also know the SACK's
5541				 * nonce sum, its in nonce_sum_flag. So we
5542				 * can build a truth table to back-calculate
5543				 * the new value of
5544				 * asoc->nonce_sum_expect_base:
5545				 *
5546				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5547				 * 1                    0 1 0 1 1 1
5548				 * 1 0
5549				 */
5550				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5551			}
5552		}
5553	}
5554	/* Now are we exiting loss recovery ? */
5555	if (will_exit_fast_recovery) {
5556		/* Ok, we must exit fast recovery */
5557		asoc->fast_retran_loss_recovery = 0;
5558	}
5559	if ((asoc->sat_t3_loss_recovery) &&
5560	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5561	    MAX_TSN) ||
5562	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5563		/* end satellite t3 loss recovery */
5564		asoc->sat_t3_loss_recovery = 0;
5565	}
5566	/*
5567	 * CMT Fast recovery
5568	 */
5569	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5570		if (net->will_exit_fast_recovery) {
5571			/* Ok, we must exit fast recovery */
5572			net->fast_retran_loss_recovery = 0;
5573		}
5574	}
5575
5576	/* Adjust and set the new rwnd value */
5577	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5578		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5579		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5580	}
5581	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5582	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5583	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5584		/* SWS sender side engages */
5585		asoc->peers_rwnd = 0;
5586	}
5587	if (asoc->peers_rwnd > old_rwnd) {
5588		win_probe_recovery = 1;
5589	}
5590	/*
5591	 * Now we must setup so we have a timer up for anyone with
5592	 * outstanding data.
5593	 */
5594	done_once = 0;
5595again:
5596	j = 0;
5597	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5598		if (win_probe_recovery && (net->window_probe)) {
5599			win_probe_recovered = 1;
5600			/*-
5601			 * Find first chunk that was used with
5602			 * window probe and clear the event. Put
5603			 * it back into the send queue as if has
5604			 * not been sent.
5605			 */
5606			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5607				if (tp1->window_probe) {
5608					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5609					break;
5610				}
5611			}
5612		}
5613		if (net->flight_size) {
5614			j++;
5615			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5616			    stcb->sctp_ep, stcb, net);
5617			if (net->window_probe) {
5618			}
5619		} else {
5620			if (net->window_probe) {
5621				/*
5622				 * In window probes we must assure a timer
5623				 * is still running there
5624				 */
5625
5626				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5627					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5628					    stcb->sctp_ep, stcb, net);
5629
5630				}
5631			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5632				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5633				    stcb, net,
5634				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5635			}
5636			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5637				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5638					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5639					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5640					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5641				}
5642			}
5643		}
5644	}
5645	if ((j == 0) &&
5646	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5647	    (asoc->sent_queue_retran_cnt == 0) &&
5648	    (win_probe_recovered == 0) &&
5649	    (done_once == 0)) {
5650		/*
5651		 * huh, this should not happen unless all packets are
5652		 * PR-SCTP and marked to skip of course.
5653		 */
5654		if (sctp_fs_audit(asoc)) {
5655			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5656				net->flight_size = 0;
5657			}
5658			asoc->total_flight = 0;
5659			asoc->total_flight_count = 0;
5660			asoc->sent_queue_retran_cnt = 0;
5661			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5662				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5663					sctp_flight_size_increase(tp1);
5664					sctp_total_flight_increase(stcb, tp1);
5665				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5666					asoc->sent_queue_retran_cnt++;
5667				}
5668			}
5669		}
5670		done_once = 1;
5671		goto again;
5672	}
5673	/* Fix up the a-p-a-p for future PR-SCTP sends */
5674	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5675		asoc->advanced_peer_ack_point = cum_ack;
5676	}
5677	/* C2. try to further move advancedPeerAckPoint ahead */
5678	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5679		struct sctp_tmit_chunk *lchk;
5680		uint32_t old_adv_peer_ack_point;
5681
5682		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5683		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5684		/* C3. See if we need to send a Fwd-TSN */
5685		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5686		    MAX_TSN)) {
5687			/*
5688			 * ISSUE with ECN, see FWD-TSN processing for notes
5689			 * on issues that will occur when the ECN NONCE
5690			 * stuff is put into SCTP for cross checking.
5691			 */
5692			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5693				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5694				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5695				    old_adv_peer_ack_point);
5696			}
5697			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5698			    MAX_TSN)) {
5699				send_forward_tsn(stcb, asoc);
5700				/*
5701				 * ECN Nonce: Disable Nonce Sum check when
5702				 * FWD TSN is sent and store resync tsn
5703				 */
5704				asoc->nonce_sum_check = 0;
5705				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5706			} else if (lchk) {
5707				/* try to FR fwd-tsn's that get lost too */
5708				lchk->rec.data.fwd_tsn_cnt++;
5709				if (lchk->rec.data.fwd_tsn_cnt > 3) {
5710					send_forward_tsn(stcb, asoc);
5711					lchk->rec.data.fwd_tsn_cnt = 0;
5712				}
5713			}
5714		}
5715		if (lchk) {
5716			/* Assure a timer is up */
5717			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5718			    stcb->sctp_ep, stcb, lchk->whoTo);
5719		}
5720	}
5721	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5722		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5723		    a_rwnd,
5724		    stcb->asoc.peers_rwnd,
5725		    stcb->asoc.total_flight,
5726		    stcb->asoc.total_output_queue_size);
5727	}
5728}
5729
5730void
5731sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5732    struct sctp_nets *netp, int *abort_flag)
5733{
5734	/* Copy cum-ack */
5735	uint32_t cum_ack, a_rwnd;
5736
5737	cum_ack = ntohl(cp->cumulative_tsn_ack);
5738	/* Arrange so a_rwnd does NOT change */
5739	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5740
5741	/* Now call the express sack handling */
5742	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5743}
5744
5745static void
5746sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5747    struct sctp_stream_in *strmin)
5748{
5749	struct sctp_queued_to_read *ctl, *nctl;
5750	struct sctp_association *asoc;
5751	int tt;
5752
5753	/* EY -used to calculate nr_gap information */
5754	uint32_t nr_tsn, nr_gap;
5755
5756	asoc = &stcb->asoc;
5757	tt = strmin->last_sequence_delivered;
5758	/*
5759	 * First deliver anything prior to and including the stream no that
5760	 * came in
5761	 */
5762	ctl = TAILQ_FIRST(&strmin->inqueue);
5763	while (ctl) {
5764		nctl = TAILQ_NEXT(ctl, next);
5765		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5766		    (tt == ctl->sinfo_ssn)) {
5767			/* this is deliverable now */
5768			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5769			/* subtract pending on streams */
5770			asoc->size_on_all_streams -= ctl->length;
5771			sctp_ucount_decr(asoc->cnt_on_all_streams);
5772			/* deliver it to at least the delivery-q */
5773			if (stcb->sctp_socket) {
5774				/* EY need the tsn info for calculating nr */
5775				nr_tsn = ctl->sinfo_tsn;
5776				sctp_add_to_readq(stcb->sctp_ep, stcb,
5777				    ctl,
5778				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5779				/*
5780				 * EY this is the chunk that should be
5781				 * tagged nr gapped calculate the gap and
5782				 * such then tag this TSN nr
5783				 * chk->rec.data.TSN_seq
5784				 */
5785				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5786
5787					if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
5788						nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
5789					} else {
5790						nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
5791					}
5792					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5793					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5794						/*
5795						 * EY These should never
5796						 * happen- explained before
5797						 */
5798					} else {
5799						SCTP_TCB_LOCK_ASSERT(stcb);
5800						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5801						if (nr_tsn > asoc->highest_tsn_inside_nr_map)
5802							asoc->highest_tsn_inside_nr_map = nr_tsn;
5803					}
5804
5805					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5806						/*
5807						 * printf("In
5808						 * sctp_kick_prsctp_reorder_q
5809						 * ueue(7): Something wrong,
5810						 * the TSN to be tagged"
5811						 * "\nas NR is not even in
5812						 * the mapping_array, or map
5813						 * and nr_map are
5814						 * inconsistent");
5815						 */
5816						/*
5817						 * EY - not %100 sure about
5818						 * the lock thing, don't
5819						 * think its required
5820						 */
5821						/*
5822						 * SCTP_TCB_LOCK_ASSERT(stcb)
5823						 * ;
5824						 */
5825					{
5826						/*
5827						 * printf("\nCalculating an
5828						 * nr_gap!!\nmapping_array_si
5829						 * ze = %d
5830						 * nr_mapping_array_size =
5831						 * %d" "\nmapping_array_base
5832						 * = %d
5833						 * nr_mapping_array_base =
5834						 * %d\nhighest_tsn_inside_map
5835						 *  = %d"
5836						 * "highest_tsn_inside_nr_map
5837						 *  = %d\nTSN = %d nr_gap =
5838						 * %d",asoc->mapping_array_si
5839						 * ze,
5840						 * asoc->nr_mapping_array_siz
5841						 * e,
5842						 * asoc->mapping_array_base_t
5843						 * sn,
5844						 * asoc->nr_mapping_array_bas
5845						 * e_tsn,
5846						 * asoc->highest_tsn_inside_m
5847						 * ap,
5848						 * asoc->highest_tsn_inside_n
5849						 * r_map,tsn,nr_gap);
5850						 */
5851					}
5852				}
5853			}
5854		} else {
5855			/* no more delivery now. */
5856			break;
5857		}
5858		ctl = nctl;
5859	}
5860	/*
5861	 * now we must deliver things in queue the normal way  if any are
5862	 * now ready.
5863	 */
5864	tt = strmin->last_sequence_delivered + 1;
5865	ctl = TAILQ_FIRST(&strmin->inqueue);
5866	while (ctl) {
5867		nctl = TAILQ_NEXT(ctl, next);
5868		if (tt == ctl->sinfo_ssn) {
5869			/* this is deliverable now */
5870			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5871			/* subtract pending on streams */
5872			asoc->size_on_all_streams -= ctl->length;
5873			sctp_ucount_decr(asoc->cnt_on_all_streams);
5874			/* deliver it to at least the delivery-q */
5875			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5876			if (stcb->sctp_socket) {
5877				/* EY */
5878				nr_tsn = ctl->sinfo_tsn;
5879				sctp_add_to_readq(stcb->sctp_ep, stcb,
5880				    ctl,
5881				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5882				/*
5883				 * EY this is the chunk that should be
5884				 * tagged nr gapped calculate the gap and
5885				 * such then tag this TSN nr
5886				 * chk->rec.data.TSN_seq
5887				 */
5888				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5889
5890					if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
5891						nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
5892					} else {
5893						nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
5894					}
5895					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5896					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5897						/*
5898						 * EY These should never
5899						 * happen, explained before
5900						 */
5901					} else {
5902						SCTP_TCB_LOCK_ASSERT(stcb);
5903						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5904						if (nr_tsn > asoc->highest_tsn_inside_nr_map)
5905							asoc->highest_tsn_inside_nr_map = nr_tsn;
5906					}
5907
5908
5909					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5910						/*
5911						 * printf("In
5912						 * sctp_kick_prsctp_reorder_q
5913						 * ueue(8): Something wrong,
5914						 * the TSN to be tagged"
5915						 * "\nas NR is not even in
5916						 * the mapping_array, or map
5917						 * and nr_map are
5918						 * inconsistent");
5919						 */
5920						/*
5921						 * EY - not %100 sure about
5922						 * the lock thing, don't
5923						 * think its required
5924						 */
5925						/*
5926						 * SCTP_TCB_LOCK_ASSERT(stcb)
5927						 * ;
5928						 */
5929					{
5930						/*
5931						 * printf("\nCalculating an
5932						 * nr_gap!!\nmapping_array_si
5933						 * ze = %d
5934						 * nr_mapping_array_size =
5935						 * %d" "\nmapping_array_base
5936						 * = %d
5937						 * nr_mapping_array_base =
5938						 * %d\nhighest_tsn_inside_map
5939						 *  = %d"
5940						 * "highest_tsn_inside_nr_map
5941						 *  = %d\nTSN = %d nr_gap =
5942						 * %d",asoc->mapping_array_si
5943						 * ze,
5944						 * asoc->nr_mapping_array_siz
5945						 * e,
5946						 * asoc->mapping_array_base_t
5947						 * sn,
5948						 * asoc->nr_mapping_array_bas
5949						 * e_tsn,
5950						 * asoc->highest_tsn_inside_m
5951						 * ap,
5952						 * asoc->highest_tsn_inside_n
5953						 * r_map,tsn,nr_gap);
5954						 */
5955					}
5956				}
5957			}
5958			tt = strmin->last_sequence_delivered + 1;
5959		} else {
5960			break;
5961		}
5962		ctl = nctl;
5963	}
5964}
5965
5966void
5967sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5968    struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
5969{
5970	/*
5971	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5972	 * forward TSN, when the SACK comes back that acknowledges the
5973	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5974	 * get quite tricky since we may have sent more data interveneing
5975	 * and must carefully account for what the SACK says on the nonce
5976	 * and any gaps that are reported. This work will NOT be done here,
5977	 * but I note it here since it is really related to PR-SCTP and
5978	 * FWD-TSN's
5979	 */
5980
5981	/* The pr-sctp fwd tsn */
5982	/*
5983	 * here we will perform all the data receiver side steps for
5984	 * processing FwdTSN, as required in by pr-sctp draft:
5985	 *
5986	 * Assume we get FwdTSN(x):
5987	 *
5988	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5989	 * others we have 3) examine and update re-ordering queue on
5990	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5991	 * report where we are.
5992	 */
5993	struct sctp_association *asoc;
5994	uint32_t new_cum_tsn, gap;
5995	unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
5996	struct sctp_stream_in *strm;
5997	struct sctp_tmit_chunk *chk, *at;
5998
5999	cumack_set_flag = 0;
6000	asoc = &stcb->asoc;
6001	cnt_gone = 0;
6002	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
6003		SCTPDBG(SCTP_DEBUG_INDATA1,
6004		    "Bad size too small/big fwd-tsn\n");
6005		return;
6006	}
6007	m_size = (stcb->asoc.mapping_array_size << 3);
6008	/*************************************************************/
6009	/* 1. Here we update local cumTSN and shift the bitmap array */
6010	/*************************************************************/
6011	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
6012
6013	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
6014	    asoc->cumulative_tsn == new_cum_tsn) {
6015		/* Already got there ... */
6016		return;
6017	}
6018	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
6019	    MAX_TSN)) {
6020		asoc->highest_tsn_inside_map = new_cum_tsn;
6021		/* EY nr_mapping_array version of the above */
6022		/*
6023		 * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
6024		 * asoc->peer_supports_nr_sack)
6025		 */
6026		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6027		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6028			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6029		}
6030	}
6031	/*
6032	 * now we know the new TSN is more advanced, let's find the actual
6033	 * gap
6034	 */
6035	if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
6036	    MAX_TSN)) ||
6037	    (new_cum_tsn == asoc->mapping_array_base_tsn)) {
6038		gap = new_cum_tsn - asoc->mapping_array_base_tsn;
6039	} else {
6040		/* try to prevent underflow here */
6041		gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
6042	}
6043
6044	if (gap >= m_size) {
6045		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6046			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6047		}
6048		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
6049			struct mbuf *oper;
6050
6051			/*
6052			 * out of range (of single byte chunks in the rwnd I
6053			 * give out). This must be an attacker.
6054			 */
6055			*abort_flag = 1;
6056			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
6057			    0, M_DONTWAIT, 1, MT_DATA);
6058			if (oper) {
6059				struct sctp_paramhdr *ph;
6060				uint32_t *ippp;
6061
6062				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6063				    (sizeof(uint32_t) * 3);
6064				ph = mtod(oper, struct sctp_paramhdr *);
6065				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6066				ph->param_length = htons(SCTP_BUF_LEN(oper));
6067				ippp = (uint32_t *) (ph + 1);
6068				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
6069				ippp++;
6070				*ippp = asoc->highest_tsn_inside_map;
6071				ippp++;
6072				*ippp = new_cum_tsn;
6073			}
6074			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
6075			sctp_abort_an_association(stcb->sctp_ep, stcb,
6076			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6077			return;
6078		}
6079		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
6080		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
6081		cumack_set_flag = 1;
6082		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
6083		asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
6084		/* EY - nr_sack: nr_mapping_array version of the above */
6085		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
6086			memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
6087			asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
6088			asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6089			if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
6090				/*
6091				 * printf("IN sctp_handle_forward_tsn:
6092				 * Something is wrong the size of" "map and
6093				 * nr_map should be equal!")
6094				 */ ;
6095			}
6096		}
6097		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6098			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6099		}
6100		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
6101	} else {
6102		SCTP_TCB_LOCK_ASSERT(stcb);
6103		for (i = 0; i <= gap; i++) {
6104			SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
6105		}
6106		/*
6107		 * Now after marking all, slide thing forward but no sack
6108		 * please.
6109		 */
6110		sctp_sack_check(stcb, 0, 0, abort_flag);
6111		if (*abort_flag)
6112			return;
6113	}
6114	/*************************************************************/
6115	/* 2. Clear up re-assembly queue                             */
6116	/*************************************************************/
6117	/*
6118	 * First service it if pd-api is up, just in case we can progress it
6119	 * forward
6120	 */
6121	if (asoc->fragmented_delivery_inprogress) {
6122		sctp_service_reassembly(stcb, asoc);
6123	}
6124	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
6125		/* For each one on here see if we need to toss it */
6126		/*
6127		 * For now large messages held on the reasmqueue that are
6128		 * complete will be tossed too. We could in theory do more
6129		 * work to spin through and stop after dumping one msg aka
6130		 * seeing the start of a new msg at the head, and call the
6131		 * delivery function... to see if it can be delivered... But
6132		 * for now we just dump everything on the queue.
6133		 */
6134		chk = TAILQ_FIRST(&asoc->reasmqueue);
6135		while (chk) {
6136			at = TAILQ_NEXT(chk, sctp_next);
6137			if ((compare_with_wrap(new_cum_tsn,
6138			    chk->rec.data.TSN_seq, MAX_TSN)) ||
6139			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
6140				/* It needs to be tossed */
6141				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
6142				if (compare_with_wrap(chk->rec.data.TSN_seq,
6143				    asoc->tsn_last_delivered, MAX_TSN)) {
6144					asoc->tsn_last_delivered =
6145					    chk->rec.data.TSN_seq;
6146					asoc->str_of_pdapi =
6147					    chk->rec.data.stream_number;
6148					asoc->ssn_of_pdapi =
6149					    chk->rec.data.stream_seq;
6150					asoc->fragment_flags =
6151					    chk->rec.data.rcv_flags;
6152				}
6153				asoc->size_on_reasm_queue -= chk->send_size;
6154				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
6155				cnt_gone++;
6156
6157				/* Clear up any stream problem */
6158				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
6159				    SCTP_DATA_UNORDERED &&
6160				    (compare_with_wrap(chk->rec.data.stream_seq,
6161				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
6162				    MAX_SEQ))) {
6163					/*
6164					 * We must dump forward this streams
6165					 * sequence number if the chunk is
6166					 * not unordered that is being
6167					 * skipped. There is a chance that
6168					 * if the peer does not include the
6169					 * last fragment in its FWD-TSN we
6170					 * WILL have a problem here since
6171					 * you would have a partial chunk in
6172					 * queue that may not be
6173					 * deliverable. Also if a Partial
6174					 * delivery API as started the user
6175					 * may get a partial chunk. The next
6176					 * read returning a new chunk...
6177					 * really ugly but I see no way
6178					 * around it! Maybe a notify??
6179					 */
6180					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
6181					    chk->rec.data.stream_seq;
6182				}
6183				if (chk->data) {
6184					sctp_m_freem(chk->data);
6185					chk->data = NULL;
6186				}
6187				sctp_free_a_chunk(stcb, chk);
6188			} else {
6189				/*
6190				 * Ok we have gone beyond the end of the
6191				 * fwd-tsn's mark. Some checks...
6192				 */
6193				if ((asoc->fragmented_delivery_inprogress) &&
6194				    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
6195					uint32_t str_seq;
6196
6197					/*
6198					 * Special case PD-API is up and
6199					 * what we fwd-tsn' over includes
6200					 * one that had the LAST_FRAG. We no
6201					 * longer need to do the PD-API.
6202					 */
6203					asoc->fragmented_delivery_inprogress = 0;
6204
6205					str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
6206					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
6207					    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
6208
6209				}
6210				break;
6211			}
6212			chk = at;
6213		}
6214	}
6215	if (asoc->fragmented_delivery_inprogress) {
6216		/*
6217		 * Ok we removed cnt_gone chunks in the PD-API queue that
6218		 * were being delivered. So now we must turn off the flag.
6219		 */
6220		uint32_t str_seq;
6221
6222		str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
6223		sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
6224		    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
6225		asoc->fragmented_delivery_inprogress = 0;
6226	}
6227	/*************************************************************/
6228	/* 3. Update the PR-stream re-ordering queues                */
6229	/*************************************************************/
6230	fwd_sz -= sizeof(*fwd);
6231	if (m && fwd_sz) {
6232		/* New method. */
6233		unsigned int num_str;
6234		struct sctp_strseq *stseq, strseqbuf;
6235
6236		offset += sizeof(*fwd);
6237
6238		num_str = fwd_sz / sizeof(struct sctp_strseq);
6239		for (i = 0; i < num_str; i++) {
6240			uint16_t st;
6241
6242			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
6243			    sizeof(struct sctp_strseq),
6244			    (uint8_t *) & strseqbuf);
6245			offset += sizeof(struct sctp_strseq);
6246			if (stseq == NULL) {
6247				break;
6248			}
6249			/* Convert */
6250			st = ntohs(stseq->stream);
6251			stseq->stream = st;
6252			st = ntohs(stseq->sequence);
6253			stseq->sequence = st;
6254			/* now process */
6255			if (stseq->stream >= asoc->streamincnt) {
6256				/* screwed up streams, stop!  */
6257				break;
6258			}
6259			strm = &asoc->strmin[stseq->stream];
6260			if (compare_with_wrap(stseq->sequence,
6261			    strm->last_sequence_delivered, MAX_SEQ)) {
6262				/* Update the sequence number */
6263				strm->last_sequence_delivered =
6264				    stseq->sequence;
6265			}
6266			/* now kick the stream the new way */
6267			/* sa_ignore NO_NULL_CHK */
6268			sctp_kick_prsctp_reorder_queue(stcb, strm);
6269		}
6270	}
6271	if (TAILQ_FIRST(&asoc->reasmqueue)) {
6272		/* now lets kick out and check for more fragmented delivery */
6273		/* sa_ignore NO_NULL_CHK */
6274		sctp_deliver_reasm_check(stcb, &stcb->asoc);
6275	}
6276}
6277
6278/* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
6279void
6280sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
6281    uint32_t rwnd, int nonce_sum_flag, int *abort_now)
6282{
6283	struct sctp_nets *net;
6284	struct sctp_association *asoc;
6285	struct sctp_tmit_chunk *tp1, *tp2;
6286	uint32_t old_rwnd;
6287	int win_probe_recovery = 0;
6288	int win_probe_recovered = 0;
6289	int j, done_once = 0;
6290
6291	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
6292		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
6293		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
6294	}
6295	SCTP_TCB_LOCK_ASSERT(stcb);
6296#ifdef SCTP_ASOCLOG_OF_TSNS
6297	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
6298	stcb->asoc.cumack_log_at++;
6299	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
6300		stcb->asoc.cumack_log_at = 0;
6301	}
6302#endif
6303	asoc = &stcb->asoc;
6304	old_rwnd = asoc->peers_rwnd;
6305	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
6306		/* old ack */
6307		return;
6308	} else if (asoc->last_acked_seq == cumack) {
6309		/* Window update sack */
6310		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6311		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6312		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6313			/* SWS sender side engages */
6314			asoc->peers_rwnd = 0;
6315		}
6316		if (asoc->peers_rwnd > old_rwnd) {
6317			goto again;
6318		}
6319		return;
6320	}
6321	/* First setup for CC stuff */
6322	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6323		net->prev_cwnd = net->cwnd;
6324		net->net_ack = 0;
6325		net->net_ack2 = 0;
6326
6327		/*
6328		 * CMT: Reset CUC and Fast recovery algo variables before
6329		 * SACK processing
6330		 */
6331		net->new_pseudo_cumack = 0;
6332		net->will_exit_fast_recovery = 0;
6333	}
6334	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
6335		uint32_t send_s;
6336
6337		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
6338			tp1 = TAILQ_LAST(&asoc->sent_queue,
6339			    sctpchunk_listhead);
6340			send_s = tp1->rec.data.TSN_seq + 1;
6341		} else {
6342			send_s = asoc->sending_seq;
6343		}
6344		if ((cumack == send_s) ||
6345		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
6346#ifndef INVARIANTS
6347			struct mbuf *oper;
6348
6349#endif
6350#ifdef INVARIANTS
6351			panic("Impossible sack 1");
6352#else
6353			*abort_now = 1;
6354			/* XXX */
6355			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6356			    0, M_DONTWAIT, 1, MT_DATA);
6357			if (oper) {
6358				struct sctp_paramhdr *ph;
6359				uint32_t *ippp;
6360
6361				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6362				    sizeof(uint32_t);
6363				ph = mtod(oper, struct sctp_paramhdr *);
6364				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6365				ph->param_length = htons(SCTP_BUF_LEN(oper));
6366				ippp = (uint32_t *) (ph + 1);
6367				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
6368			}
6369			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
6370			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6371			return;
6372#endif
6373		}
6374	}
6375	asoc->this_sack_highest_gap = cumack;
6376	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6377		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6378		    stcb->asoc.overall_error_count,
6379		    0,
6380		    SCTP_FROM_SCTP_INDATA,
6381		    __LINE__);
6382	}
6383	stcb->asoc.overall_error_count = 0;
6384	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
6385		/* process the new consecutive TSN first */
6386		tp1 = TAILQ_FIRST(&asoc->sent_queue);
6387		while (tp1) {
6388			tp2 = TAILQ_NEXT(tp1, sctp_next);
6389			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
6390			    MAX_TSN) ||
6391			    cumack == tp1->rec.data.TSN_seq) {
6392				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
6393					printf("Warning, an unsent is now acked?\n");
6394				}
6395				/*
6396				 * ECN Nonce: Add the nonce to the sender's
6397				 * nonce sum
6398				 */
6399				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
6400				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
6401					/*
6402					 * If it is less than ACKED, it is
6403					 * now no-longer in flight. Higher
6404					 * values may occur during marking
6405					 */
6406					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6407						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6408							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
6409							    tp1->whoTo->flight_size,
6410							    tp1->book_size,
6411							    (uintptr_t) tp1->whoTo,
6412							    tp1->rec.data.TSN_seq);
6413						}
6414						sctp_flight_size_decrease(tp1);
6415						/* sa_ignore NO_NULL_CHK */
6416						sctp_total_flight_decrease(stcb, tp1);
6417					}
6418					tp1->whoTo->net_ack += tp1->send_size;
6419					if (tp1->snd_count < 2) {
6420						/*
6421						 * True non-retransmited
6422						 * chunk
6423						 */
6424						tp1->whoTo->net_ack2 +=
6425						    tp1->send_size;
6426
6427						/* update RTO too? */
6428						if (tp1->do_rtt) {
6429							tp1->whoTo->RTO =
6430							/*
6431							 * sa_ignore
6432							 * NO_NULL_CHK
6433							 */
6434							    sctp_calculate_rto(stcb,
6435							    asoc, tp1->whoTo,
6436							    &tp1->sent_rcv_time,
6437							    sctp_align_safe_nocopy);
6438							tp1->do_rtt = 0;
6439						}
6440					}
6441					/*
6442					 * CMT: CUCv2 algorithm. From the
6443					 * cumack'd TSNs, for each TSN being
6444					 * acked for the first time, set the
6445					 * following variables for the
6446					 * corresp destination.
6447					 * new_pseudo_cumack will trigger a
6448					 * cwnd update.
6449					 * find_(rtx_)pseudo_cumack will
6450					 * trigger search for the next
6451					 * expected (rtx-)pseudo-cumack.
6452					 */
6453					tp1->whoTo->new_pseudo_cumack = 1;
6454					tp1->whoTo->find_pseudo_cumack = 1;
6455					tp1->whoTo->find_rtx_pseudo_cumack = 1;
6456
6457					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6458						/* sa_ignore NO_NULL_CHK */
6459						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6460					}
6461				}
6462				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6463					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6464				}
6465				if (tp1->rec.data.chunk_was_revoked) {
6466					/* deflate the cwnd */
6467					tp1->whoTo->cwnd -= tp1->book_size;
6468					tp1->rec.data.chunk_was_revoked = 0;
6469				}
6470				tp1->sent = SCTP_DATAGRAM_ACKED;
6471				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
6472				if (tp1->data) {
6473					/* sa_ignore NO_NULL_CHK */
6474					sctp_free_bufspace(stcb, asoc, tp1, 1);
6475					sctp_m_freem(tp1->data);
6476				}
6477				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6478					sctp_log_sack(asoc->last_acked_seq,
6479					    cumack,
6480					    tp1->rec.data.TSN_seq,
6481					    0,
6482					    0,
6483					    SCTP_LOG_FREE_SENT);
6484				}
6485				tp1->data = NULL;
6486				asoc->sent_queue_cnt--;
6487				sctp_free_a_chunk(stcb, tp1);
6488				tp1 = tp2;
6489			} else {
6490				break;
6491			}
6492		}
6493
6494	}
6495	/* sa_ignore NO_NULL_CHK */
6496	if (stcb->sctp_socket) {
6497#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6498		struct socket *so;
6499
6500#endif
6501
6502		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
6503		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6504			/* sa_ignore NO_NULL_CHK */
6505			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
6506		}
6507#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6508		so = SCTP_INP_SO(stcb->sctp_ep);
6509		atomic_add_int(&stcb->asoc.refcnt, 1);
6510		SCTP_TCB_UNLOCK(stcb);
6511		SCTP_SOCKET_LOCK(so, 1);
6512		SCTP_TCB_LOCK(stcb);
6513		atomic_subtract_int(&stcb->asoc.refcnt, 1);
6514		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6515			/* assoc was freed while we were unlocked */
6516			SCTP_SOCKET_UNLOCK(so, 1);
6517			return;
6518		}
6519#endif
6520		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
6521#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6522		SCTP_SOCKET_UNLOCK(so, 1);
6523#endif
6524	} else {
6525		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6526			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
6527		}
6528	}
6529
6530	/* JRS - Use the congestion control given in the CC module */
6531	if (asoc->last_acked_seq != cumack)
6532		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
6533
6534	asoc->last_acked_seq = cumack;
6535
6536	if (TAILQ_EMPTY(&asoc->sent_queue)) {
6537		/* nothing left in-flight */
6538		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6539			net->flight_size = 0;
6540			net->partial_bytes_acked = 0;
6541		}
6542		asoc->total_flight = 0;
6543		asoc->total_flight_count = 0;
6544	}
6545	/* Fix up the a-p-a-p for future PR-SCTP sends */
6546	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
6547		asoc->advanced_peer_ack_point = cumack;
6548	}
6549	/* ECN Nonce updates */
6550	if (asoc->ecn_nonce_allowed) {
6551		if (asoc->nonce_sum_check) {
6552			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
6553				if (asoc->nonce_wait_for_ecne == 0) {
6554					struct sctp_tmit_chunk *lchk;
6555
6556					lchk = TAILQ_FIRST(&asoc->send_queue);
6557					asoc->nonce_wait_for_ecne = 1;
6558					if (lchk) {
6559						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
6560					} else {
6561						asoc->nonce_wait_tsn = asoc->sending_seq;
6562					}
6563				} else {
6564					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
6565					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
6566						/*
6567						 * Misbehaving peer. We need
6568						 * to react to this guy
6569						 */
6570						asoc->ecn_allowed = 0;
6571						asoc->ecn_nonce_allowed = 0;
6572					}
6573				}
6574			}
6575		} else {
6576			/* See if Resynchronization Possible */
6577			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
6578				asoc->nonce_sum_check = 1;
6579				/*
6580				 * now we must calculate what the base is.
6581				 * We do this based on two things, we know
6582				 * the total's for all the segments
6583				 * gap-acked in the SACK (none), We also
6584				 * know the SACK's nonce sum, its in
6585				 * nonce_sum_flag. So we can build a truth
6586				 * table to back-calculate the new value of
6587				 * asoc->nonce_sum_expect_base:
6588				 *
6589				 * SACK-flag-Value         Seg-Sums Base 0 0 0
6590				 * 1                    0 1 0 1 1 1 1 0
6591				 */
6592				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
6593			}
6594		}
6595	}
6596	/* RWND update */
6597	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6598	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6599	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6600		/* SWS sender side engages */
6601		asoc->peers_rwnd = 0;
6602	}
6603	if (asoc->peers_rwnd > old_rwnd) {
6604		win_probe_recovery = 1;
6605	}
6606	/* Now assure a timer where data is queued at */
6607again:
6608	j = 0;
6609	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6610		int to_ticks;
6611
6612		if (win_probe_recovery && (net->window_probe)) {
6613			win_probe_recovered = 1;
6614			/*
6615			 * Find first chunk that was used with window probe
6616			 * and clear the sent
6617			 */
6618			/* sa_ignore FREED_MEMORY */
6619			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6620				if (tp1->window_probe) {
6621					/* move back to data send queue */
6622					sctp_window_probe_recovery(stcb, asoc, net, tp1);
6623					break;
6624				}
6625			}
6626		}
6627		if (net->RTO == 0) {
6628			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
6629		} else {
6630			to_ticks = MSEC_TO_TICKS(net->RTO);
6631		}
6632		if (net->flight_size) {
6633
6634			j++;
6635			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6636			    sctp_timeout_handler, &net->rxt_timer);
6637			if (net->window_probe) {
6638				net->window_probe = 0;
6639			}
6640		} else {
6641			if (net->window_probe) {
6642				/*
6643				 * In window probes we must assure a timer
6644				 * is still running there
6645				 */
6646				net->window_probe = 0;
6647				(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6648				    sctp_timeout_handler, &net->rxt_timer);
6649			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
6650				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
6651				    stcb, net,
6652				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
6653			}
6654			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
6655				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6656					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
6657					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
6658					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
6659				}
6660			}
6661		}
6662	}
6663	if ((j == 0) &&
6664	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
6665	    (asoc->sent_queue_retran_cnt == 0) &&
6666	    (win_probe_recovered == 0) &&
6667	    (done_once == 0)) {
6668		/*
6669		 * huh, this should not happen unless all packets are
6670		 * PR-SCTP and marked to skip of course.
6671		 */
6672		if (sctp_fs_audit(asoc)) {
6673			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6674				net->flight_size = 0;
6675			}
6676			asoc->total_flight = 0;
6677			asoc->total_flight_count = 0;
6678			asoc->sent_queue_retran_cnt = 0;
6679			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6680				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6681					sctp_flight_size_increase(tp1);
6682					sctp_total_flight_increase(stcb, tp1);
6683				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6684					asoc->sent_queue_retran_cnt++;
6685				}
6686			}
6687		}
6688		done_once = 1;
6689		goto again;
6690	}
6691	/**********************************/
6692	/* Now what about shutdown issues */
6693	/**********************************/
6694	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
6695		/* nothing left on sendqueue.. consider done */
6696		/* clean up */
6697		if ((asoc->stream_queue_cnt == 1) &&
6698		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
6699		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
6700		    (asoc->locked_on_sending)
6701		    ) {
6702			struct sctp_stream_queue_pending *sp;
6703
6704			/*
6705			 * I may be in a state where we got all across.. but
6706			 * cannot write more due to a shutdown... we abort
6707			 * since the user did not indicate EOR in this case.
6708			 * The sp will be cleaned during free of the asoc.
6709			 */
6710			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
6711			    sctp_streamhead);
6712			if ((sp) && (sp->length == 0)) {
6713				/* Let cleanup code purge it */
6714				if (sp->msg_is_complete) {
6715					asoc->stream_queue_cnt--;
6716				} else {
6717					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6718					asoc->locked_on_sending = NULL;
6719					asoc->stream_queue_cnt--;
6720				}
6721			}
6722		}
6723		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
6724		    (asoc->stream_queue_cnt == 0)) {
6725			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6726				/* Need to abort here */
6727				struct mbuf *oper;
6728
6729		abort_out_now:
6730				*abort_now = 1;
6731				/* XXX */
6732				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6733				    0, M_DONTWAIT, 1, MT_DATA);
6734				if (oper) {
6735					struct sctp_paramhdr *ph;
6736					uint32_t *ippp;
6737
6738					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6739					    sizeof(uint32_t);
6740					ph = mtod(oper, struct sctp_paramhdr *);
6741					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6742					ph->param_length = htons(SCTP_BUF_LEN(oper));
6743					ippp = (uint32_t *) (ph + 1);
6744					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
6745				}
6746				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
6747				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
6748			} else {
6749				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
6750				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
6751					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6752				}
6753				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6754				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6755				sctp_stop_timers_for_shutdown(stcb);
6756				sctp_send_shutdown(stcb,
6757				    stcb->asoc.primary_destination);
6758				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
6759				    stcb->sctp_ep, stcb, asoc->primary_destination);
6760				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
6761				    stcb->sctp_ep, stcb, asoc->primary_destination);
6762			}
6763		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
6764		    (asoc->stream_queue_cnt == 0)) {
6765			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6766				goto abort_out_now;
6767			}
6768			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6769			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
6770			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6771			sctp_send_shutdown_ack(stcb,
6772			    stcb->asoc.primary_destination);
6773
6774			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
6775			    stcb->sctp_ep, stcb, asoc->primary_destination);
6776		}
6777	}
6778	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
6779		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
6780		    rwnd,
6781		    stcb->asoc.peers_rwnd,
6782		    stcb->asoc.total_flight,
6783		    stcb->asoc.total_output_queue_size);
6784	}
6785}
6786
6787/* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
6788static void
6789sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
6790    struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
6791    uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
6792    uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
6793{
6794	/************************************************/
6795	/* process fragments and update sendqueue        */
6796	/************************************************/
6797	struct sctp_nr_sack *nr_sack;
6798	struct sctp_gap_ack_block *frag, block;
6799	struct sctp_nr_gap_ack_block *nr_frag, nr_block;
6800	struct sctp_tmit_chunk *tp1;
6801	uint32_t i, j, all_bit;
6802	int wake_him = 0;
6803	uint32_t theTSN;
6804	int num_frs = 0;
6805
6806	uint16_t frag_strt, frag_end, primary_flag_set;
6807	uint16_t nr_frag_strt, nr_frag_end;
6808
6809	uint32_t last_frag_high;
6810	uint32_t last_nr_frag_high;
6811
6812	all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
6813
6814	/*
6815	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
6816	 */
6817	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
6818		primary_flag_set = 1;
6819	} else {
6820		primary_flag_set = 0;
6821	}
6822	nr_sack = &ch->nr_sack;
6823
6824	/*
6825	 * EY! - I will process nr_gaps similarly,by going to this position
6826	 * again if All bit is set
6827	 */
6828	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
6829	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
6830	*offset += sizeof(block);
6831	if (frag == NULL) {
6832		return;
6833	}
6834	tp1 = NULL;
6835	last_frag_high = 0;
6836	for (i = 0; i < num_seg; i++) {
6837		frag_strt = ntohs(frag->start);
6838		frag_end = ntohs(frag->end);
6839		/* some sanity checks on the fargment offsets */
6840		if (frag_strt > frag_end) {
6841			/* this one is malformed, skip */
6842			frag++;
6843			continue;
6844		}
6845		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
6846		    MAX_TSN))
6847			*biggest_tsn_acked = frag_end + last_tsn;
6848
6849		/* mark acked dgs and find out the highestTSN being acked */
6850		if (tp1 == NULL) {
6851			tp1 = TAILQ_FIRST(&asoc->sent_queue);
6852
6853			/* save the locations of the last frags */
6854			last_frag_high = frag_end + last_tsn;
6855		} else {
6856			/*
6857			 * now lets see if we need to reset the queue due to
6858			 * a out-of-order SACK fragment
6859			 */
6860			if (compare_with_wrap(frag_strt + last_tsn,
6861			    last_frag_high, MAX_TSN)) {
6862				/*
6863				 * if the new frag starts after the last TSN
6864				 * frag covered, we are ok and this one is
6865				 * beyond the last one
6866				 */
6867				;
6868			} else {
6869				/*
6870				 * ok, they have reset us, so we need to
6871				 * reset the queue this will cause extra
6872				 * hunting but hey, they chose the
6873				 * performance hit when they failed to order
6874				 * there gaps..
6875				 */
6876				tp1 = TAILQ_FIRST(&asoc->sent_queue);
6877			}
6878			last_frag_high = frag_end + last_tsn;
6879		}
6880		for (j = frag_strt; j <= frag_end; j++) {
6881			theTSN = j + last_tsn;
6882			while (tp1) {
6883				if (tp1->rec.data.doing_fast_retransmit)
6884					num_frs++;
6885
6886				/*
6887				 * CMT: CUCv2 algorithm. For each TSN being
6888				 * processed from the sent queue, track the
6889				 * next expected pseudo-cumack, or
6890				 * rtx_pseudo_cumack, if required. Separate
6891				 * cumack trackers for first transmissions,
6892				 * and retransmissions.
6893				 */
6894				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6895				    (tp1->snd_count == 1)) {
6896					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
6897					tp1->whoTo->find_pseudo_cumack = 0;
6898				}
6899				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6900				    (tp1->snd_count > 1)) {
6901					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
6902					tp1->whoTo->find_rtx_pseudo_cumack = 0;
6903				}
6904				if (tp1->rec.data.TSN_seq == theTSN) {
6905					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
6906						/*
6907						 * must be held until
6908						 * cum-ack passes
6909						 */
6910						/*
6911						 * ECN Nonce: Add the nonce
6912						 * value to the sender's
6913						 * nonce sum
6914						 */
6915						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6916							/*-
6917							 * If it is less than RESEND, it is
6918							 * now no-longer in flight.
6919							 * Higher values may already be set
6920							 * via previous Gap Ack Blocks...
6921							 * i.e. ACKED or RESEND.
6922							 */
6923							if (compare_with_wrap(tp1->rec.data.TSN_seq,
6924							    *biggest_newly_acked_tsn, MAX_TSN)) {
6925								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
6926							}
6927							/*
6928							 * CMT: SFR algo
6929							 * (and HTNA) - set
6930							 * saw_newack to 1
6931							 * for dest being
6932							 * newly acked.
6933							 * update
6934							 * this_sack_highest_
6935							 * newack if
6936							 * appropriate.
6937							 */
6938							if (tp1->rec.data.chunk_was_revoked == 0)
6939								tp1->whoTo->saw_newack = 1;
6940
6941							if (compare_with_wrap(tp1->rec.data.TSN_seq,
6942							    tp1->whoTo->this_sack_highest_newack,
6943							    MAX_TSN)) {
6944								tp1->whoTo->this_sack_highest_newack =
6945								    tp1->rec.data.TSN_seq;
6946							}
6947							/*
6948							 * CMT DAC algo:
6949							 * also update
6950							 * this_sack_lowest_n
6951							 * ewack
6952							 */
6953							if (*this_sack_lowest_newack == 0) {
6954								if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6955									sctp_log_sack(*this_sack_lowest_newack,
6956									    last_tsn,
6957									    tp1->rec.data.TSN_seq,
6958									    0,
6959									    0,
6960									    SCTP_LOG_TSN_ACKED);
6961								}
6962								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
6963							}
6964							/*
6965							 * CMT: CUCv2
6966							 * algorithm. If
6967							 * (rtx-)pseudo-cumac
6968							 * k for corresp
6969							 * dest is being
6970							 * acked, then we
6971							 * have a new
6972							 * (rtx-)pseudo-cumac
6973							 * k. Set
6974							 * new_(rtx_)pseudo_c
6975							 * umack to TRUE so
6976							 * that the cwnd for
6977							 * this dest can be
6978							 * updated. Also
6979							 * trigger search
6980							 * for the next
6981							 * expected
6982							 * (rtx-)pseudo-cumac
6983							 * k. Separate
6984							 * pseudo_cumack
6985							 * trackers for
6986							 * first
6987							 * transmissions and
6988							 * retransmissions.
6989							 */
6990							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
6991								if (tp1->rec.data.chunk_was_revoked == 0) {
6992									tp1->whoTo->new_pseudo_cumack = 1;
6993								}
6994								tp1->whoTo->find_pseudo_cumack = 1;
6995							}
6996							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6997								sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6998							}
6999							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
7000								if (tp1->rec.data.chunk_was_revoked == 0) {
7001									tp1->whoTo->new_pseudo_cumack = 1;
7002								}
7003								tp1->whoTo->find_rtx_pseudo_cumack = 1;
7004							}
7005							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7006								sctp_log_sack(*biggest_newly_acked_tsn,
7007								    last_tsn,
7008								    tp1->rec.data.TSN_seq,
7009								    frag_strt,
7010								    frag_end,
7011								    SCTP_LOG_TSN_ACKED);
7012							}
7013							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7014								sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
7015								    tp1->whoTo->flight_size,
7016								    tp1->book_size,
7017								    (uintptr_t) tp1->whoTo,
7018								    tp1->rec.data.TSN_seq);
7019							}
7020							sctp_flight_size_decrease(tp1);
7021							sctp_total_flight_decrease(stcb, tp1);
7022
7023							tp1->whoTo->net_ack += tp1->send_size;
7024							if (tp1->snd_count < 2) {
7025								/*
7026								 * True
7027								 * non-retran
7028								 * smited
7029								 * chunk
7030								 */
7031								tp1->whoTo->net_ack2 += tp1->send_size;
7032
7033								/*
7034								 * update
7035								 * RTO too ?
7036								 */
7037								if (tp1->do_rtt) {
7038									tp1->whoTo->RTO =
7039									    sctp_calculate_rto(stcb,
7040									    asoc,
7041									    tp1->whoTo,
7042									    &tp1->sent_rcv_time,
7043									    sctp_align_safe_nocopy);
7044									tp1->do_rtt = 0;
7045								}
7046							}
7047						}
7048						if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
7049							(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
7050							(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
7051							if (compare_with_wrap(tp1->rec.data.TSN_seq,
7052							    asoc->this_sack_highest_gap,
7053							    MAX_TSN)) {
7054								asoc->this_sack_highest_gap =
7055								    tp1->rec.data.TSN_seq;
7056							}
7057							if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7058								sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7059#ifdef SCTP_AUDITING_ENABLED
7060								sctp_audit_log(0xB2,
7061								    (asoc->sent_queue_retran_cnt & 0x000000ff));
7062#endif
7063							}
7064						}
7065						/*
7066						 * All chunks NOT UNSENT
7067						 * fall through here and are
7068						 * marked
7069						 */
7070						tp1->sent = SCTP_DATAGRAM_MARKED;
7071						if (tp1->rec.data.chunk_was_revoked) {
7072							/* deflate the cwnd */
7073							tp1->whoTo->cwnd -= tp1->book_size;
7074							tp1->rec.data.chunk_was_revoked = 0;
7075						}
7076						/*
7077						 * EY - if all bit is set
7078						 * then this TSN is
7079						 * nr_marked
7080						 */
7081						if (all_bit) {
7082							tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7083							/*
7084							 * TAILQ_REMOVE(&asoc
7085							 * ->sent_queue,
7086							 * tp1, sctp_next);
7087							 */
7088							if (tp1->data) {
7089								/*
7090								 * sa_ignore
7091								 * NO_NULL_CH
7092								 * K
7093								 */
7094								sctp_free_bufspace(stcb, asoc, tp1, 1);
7095								sctp_m_freem(tp1->data);
7096							}
7097							tp1->data = NULL;
7098							/*
7099							 * asoc->sent_queue_c
7100							 * nt--;
7101							 */
7102							/*
7103							 * sctp_free_a_chunk(
7104							 * stcb, tp1);
7105							 */
7106							wake_him++;
7107						}
7108					}
7109					break;
7110				}	/* if (tp1->TSN_seq == theTSN) */
7111				if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
7112				    MAX_TSN))
7113					break;
7114
7115				tp1 = TAILQ_NEXT(tp1, sctp_next);
7116			}	/* end while (tp1) */
7117		}		/* end for (j = fragStart */
7118		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
7119		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
7120		*offset += sizeof(block);
7121		if (frag == NULL) {
7122			break;
7123		}
7124	}
7125
7126	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
7127		if (num_frs)
7128			sctp_log_fr(*biggest_tsn_acked,
7129			    *biggest_newly_acked_tsn,
7130			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
7131	}
7132	/*
7133	 * EY - if all bit is not set then there should be other loops to
7134	 * identify nr TSNs
7135	 */
7136	if (!all_bit) {
7137
7138		nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7139		    sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7140		*offset += sizeof(nr_block);
7141
7142
7143
7144		if (nr_frag == NULL) {
7145			return;
7146		}
7147		tp1 = NULL;
7148		last_nr_frag_high = 0;
7149
7150		for (i = 0; i < num_nr_seg; i++) {
7151
7152			nr_frag_strt = ntohs(nr_frag->start);
7153			nr_frag_end = ntohs(nr_frag->end);
7154
7155			/* some sanity checks on the nr fargment offsets */
7156			if (nr_frag_strt > nr_frag_end) {
7157				/* this one is malformed, skip */
7158				nr_frag++;
7159				continue;
7160			}
7161			/*
7162			 * mark acked dgs and find out the highestTSN being
7163			 * acked
7164			 */
7165			if (tp1 == NULL) {
7166				tp1 = TAILQ_FIRST(&asoc->sent_queue);
7167
7168				/* save the locations of the last frags */
7169				last_nr_frag_high = nr_frag_end + last_tsn;
7170			} else {
7171				/*
7172				 * now lets see if we need to reset the
7173				 * queue due to a out-of-order SACK fragment
7174				 */
7175				if (compare_with_wrap(nr_frag_strt + last_tsn,
7176				    last_nr_frag_high, MAX_TSN)) {
7177					/*
7178					 * if the new frag starts after the
7179					 * last TSN frag covered, we are ok
7180					 * and this one is beyond the last
7181					 * one
7182					 */
7183					;
7184				} else {
7185					/*
7186					 * ok, they have reset us, so we
7187					 * need to reset the queue this will
7188					 * cause extra hunting but hey, they
7189					 * chose the performance hit when
7190					 * they failed to order there gaps..
7191					 */
7192					tp1 = TAILQ_FIRST(&asoc->sent_queue);
7193				}
7194				last_nr_frag_high = nr_frag_end + last_tsn;
7195			}
7196
7197			for (j = nr_frag_strt + last_tsn; (compare_with_wrap((nr_frag_end + last_tsn), j, MAX_TSN)); j++) {
7198				while (tp1) {
7199					if (tp1->rec.data.TSN_seq == j) {
7200						if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7201							tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7202							/*
7203							 * TAILQ_REMOVE(&asoc
7204							 * ->sent_queue,
7205							 * tp1, sctp_next);
7206							 */
7207							if (tp1->data) {
7208								/*
7209								 * sa_ignore
7210								 * NO_NULL_CH
7211								 * K
7212								 */
7213								sctp_free_bufspace(stcb, asoc, tp1, 1);
7214								sctp_m_freem(tp1->data);
7215							}
7216							tp1->data = NULL;
7217							/*
7218							 * asoc->sent_queue_c
7219							 * nt--;
7220							 */
7221							/*
7222							 * sctp_free_a_chunk(
7223							 * stcb, tp1);
7224							 */
7225							wake_him++;
7226						}
7227						break;
7228					}	/* if (tp1->TSN_seq == j) */
7229					if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
7230					    MAX_TSN))
7231						break;
7232					tp1 = TAILQ_NEXT(tp1, sctp_next);
7233				}	/* end while (tp1) */
7234
7235			}	/* end for (j = nrFragStart */
7236
7237			nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7238			    sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7239			*offset += sizeof(nr_block);
7240			if (nr_frag == NULL) {
7241				break;
7242			}
7243		}		/* end of if(!all_bit) */
7244	}
7245	/*
7246	 * EY- wake up the socket if things have been removed from the sent
7247	 * queue
7248	 */
7249	if ((wake_him) && (stcb->sctp_socket)) {
7250#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7251		struct socket *so;
7252
7253#endif
7254		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7255		/*
7256		 * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
7257		 * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
7258		 * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
7259		 */
7260#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7261		so = SCTP_INP_SO(stcb->sctp_ep);
7262		atomic_add_int(&stcb->asoc.refcnt, 1);
7263		SCTP_TCB_UNLOCK(stcb);
7264		SCTP_SOCKET_LOCK(so, 1);
7265		SCTP_TCB_LOCK(stcb);
7266		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7267		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7268			/* assoc was freed while we were unlocked */
7269			SCTP_SOCKET_UNLOCK(so, 1);
7270			return;
7271		}
7272#endif
7273		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7274#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7275		SCTP_SOCKET_UNLOCK(so, 1);
7276#endif
7277	}			/* else { if
7278				 * (SCTP_BASE_SYSCTL(sctp_logging_level) &
7279				 * SCTP_WAKE_LOGGING_ENABLE) {
7280				 * sctp_wakeup_log(stcb, cum_ack, wake_him,
7281				 * SCTP_NOWAKE_FROM_SACK); } } */
7282}
7283
7284/* EY- nr_sack */
7285/* Identifies the non-renegable tsns that are revoked*/
7286static void
7287sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
7288    struct sctp_association *asoc, uint32_t cumack,
7289    u_long biggest_tsn_acked)
7290{
7291	struct sctp_tmit_chunk *tp1;
7292
7293	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7294	while (tp1) {
7295		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
7296		    MAX_TSN)) {
7297			/*
7298			 * ok this guy is either ACK or MARKED. If it is
7299			 * ACKED it has been previously acked but not this
7300			 * time i.e. revoked.  If it is MARKED it was ACK'ed
7301			 * again.
7302			 */
7303			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
7304			    MAX_TSN))
7305				break;
7306
7307
7308			if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
7309				/*
7310				 * EY! a non-renegable TSN is revoked, need
7311				 * to abort the association
7312				 */
7313				/*
7314				 * EY TODO: put in the code to abort the
7315				 * assoc.
7316				 */
7317				return;
7318			} else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
7319				/* it has been re-acked in this SACK */
7320				tp1->sent = SCTP_DATAGRAM_NR_ACKED;
7321			}
7322		}
7323		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
7324			break;
7325		tp1 = TAILQ_NEXT(tp1, sctp_next);
7326	}
7327}
7328
7329/* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
7330void
7331sctp_handle_nr_sack(struct mbuf *m, int offset,
7332    struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
7333    struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
7334{
7335	struct sctp_association *asoc;
7336
7337	/* EY sack */
7338	struct sctp_nr_sack *nr_sack;
7339	struct sctp_tmit_chunk *tp1, *tp2;
7340	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
7341	         this_sack_lowest_newack;
7342	uint32_t sav_cum_ack;
7343
7344	/* EY num_seg */
7345	uint16_t num_seg, num_nr_seg, num_dup;
7346	uint16_t wake_him = 0;
7347	unsigned int nr_sack_length;
7348	uint32_t send_s = 0;
7349	long j;
7350	int accum_moved = 0;
7351	int will_exit_fast_recovery = 0;
7352	uint32_t a_rwnd, old_rwnd;
7353	int win_probe_recovery = 0;
7354	int win_probe_recovered = 0;
7355	struct sctp_nets *net = NULL;
7356	int nonce_sum_flag, ecn_seg_sums = 0, all_bit;
7357	int done_once;
7358	uint8_t reneged_all = 0;
7359	uint8_t cmt_dac_flag;
7360
7361	/*
7362	 * we take any chance we can to service our queues since we cannot
7363	 * get awoken when the socket is read from :<
7364	 */
7365	/*
7366	 * Now perform the actual SACK handling: 1) Verify that it is not an
7367	 * old sack, if so discard. 2) If there is nothing left in the send
7368	 * queue (cum-ack is equal to last acked) then you have a duplicate
7369	 * too, update any rwnd change and verify no timers are running.
7370	 * then return. 3) Process any new consequtive data i.e. cum-ack
7371	 * moved process these first and note that it moved. 4) Process any
7372	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
7373	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
7374	 * sync up flightsizes and things, stop all timers and also check
7375	 * for shutdown_pending state. If so then go ahead and send off the
7376	 * shutdown. If in shutdown recv, send off the shutdown-ack and
7377	 * start that timer, Ret. 9) Strike any non-acked things and do FR
7378	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
7379	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
7380	 * if in shutdown_recv state.
7381	 */
7382	SCTP_TCB_LOCK_ASSERT(stcb);
7383	nr_sack = &ch->nr_sack;
7384	/* CMT DAC algo */
7385	this_sack_lowest_newack = 0;
7386	j = 0;
7387	nr_sack_length = (unsigned int)nr_sack_len;
7388	/* ECN Nonce */
7389	SCTP_STAT_INCR(sctps_slowpath_sack);
7390	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
7391	cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
7392#ifdef SCTP_ASOCLOG_OF_TSNS
7393	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
7394	stcb->asoc.cumack_log_at++;
7395	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
7396		stcb->asoc.cumack_log_at = 0;
7397	}
7398#endif
7399	all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
7400	num_seg = ntohs(nr_sack->num_gap_ack_blks);
7401	num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
7402	if (all_bit)
7403		num_seg = num_nr_seg;
7404	a_rwnd = rwnd;
7405
7406	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
7407		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
7408		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
7409	}
7410	/* CMT DAC algo */
7411	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
7412	num_dup = ntohs(nr_sack->num_dup_tsns);
7413
7414	old_rwnd = stcb->asoc.peers_rwnd;
7415	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
7416		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
7417		    stcb->asoc.overall_error_count,
7418		    0,
7419		    SCTP_FROM_SCTP_INDATA,
7420		    __LINE__);
7421	}
7422	stcb->asoc.overall_error_count = 0;
7423	asoc = &stcb->asoc;
7424	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7425		sctp_log_sack(asoc->last_acked_seq,
7426		    cum_ack,
7427		    0,
7428		    num_seg,
7429		    num_dup,
7430		    SCTP_LOG_NEW_SACK);
7431	}
7432	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
7433		int off_to_dup, iii;
7434		uint32_t *dupdata, dblock;
7435
7436		/* EY! gotta be careful here */
7437		if (all_bit) {
7438			off_to_dup = (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) +
7439			    sizeof(struct sctp_nr_sack_chunk);
7440		} else {
7441			off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
7442			    (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
7443		}
7444		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
7445			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7446			    sizeof(uint32_t), (uint8_t *) & dblock);
7447			off_to_dup += sizeof(uint32_t);
7448			if (dupdata) {
7449				for (iii = 0; iii < num_dup; iii++) {
7450					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
7451					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7452					    sizeof(uint32_t), (uint8_t *) & dblock);
7453					if (dupdata == NULL)
7454						break;
7455					off_to_dup += sizeof(uint32_t);
7456				}
7457			}
7458		} else {
7459			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
7460			    off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
7461		}
7462	}
7463	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7464		/* reality check */
7465		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
7466			tp1 = TAILQ_LAST(&asoc->sent_queue,
7467			    sctpchunk_listhead);
7468			send_s = tp1->rec.data.TSN_seq + 1;
7469		} else {
7470			send_s = asoc->sending_seq;
7471		}
7472		if (cum_ack == send_s ||
7473		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
7474#ifndef INVARIANTS
7475			struct mbuf *oper;
7476
7477#endif
7478#ifdef INVARIANTS
7479	hopeless_peer:
7480			panic("Impossible sack 1");
7481#else
7482
7483
7484			/*
7485			 * no way, we have not even sent this TSN out yet.
7486			 * Peer is hopelessly messed up with us.
7487			 */
7488	hopeless_peer:
7489			*abort_now = 1;
7490			/* XXX */
7491			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7492			    0, M_DONTWAIT, 1, MT_DATA);
7493			if (oper) {
7494				struct sctp_paramhdr *ph;
7495				uint32_t *ippp;
7496
7497				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7498				    sizeof(uint32_t);
7499				ph = mtod(oper, struct sctp_paramhdr *);
7500				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
7501				ph->param_length = htons(SCTP_BUF_LEN(oper));
7502				ippp = (uint32_t *) (ph + 1);
7503				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
7504			}
7505			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
7506			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
7507			return;
7508#endif
7509		}
7510	}
7511	/**********************/
7512	/* 1) check the range */
7513	/**********************/
7514	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
7515		/* acking something behind */
7516		return;
7517	}
7518	sav_cum_ack = asoc->last_acked_seq;
7519
7520	/* update the Rwnd of the peer */
7521	if (TAILQ_EMPTY(&asoc->sent_queue) &&
7522	    TAILQ_EMPTY(&asoc->send_queue) &&
7523	    (asoc->stream_queue_cnt == 0)
7524	    ) {
7525		/* nothing left on send/sent and strmq */
7526		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7527			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7528			    asoc->peers_rwnd, 0, 0, a_rwnd);
7529		}
7530		asoc->peers_rwnd = a_rwnd;
7531		if (asoc->sent_queue_retran_cnt) {
7532			asoc->sent_queue_retran_cnt = 0;
7533		}
7534		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7535			/* SWS sender side engages */
7536			asoc->peers_rwnd = 0;
7537		}
7538		/* stop any timers */
7539		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7540			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7541			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7542			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7543				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7544					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
7545					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7546					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7547				}
7548			}
7549			net->partial_bytes_acked = 0;
7550			net->flight_size = 0;
7551		}
7552		asoc->total_flight = 0;
7553		asoc->total_flight_count = 0;
7554		return;
7555	}
7556	/*
7557	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
7558	 * things. The total byte count acked is tracked in netAckSz AND
7559	 * netAck2 is used to track the total bytes acked that are un-
7560	 * amibguious and were never retransmitted. We track these on a per
7561	 * destination address basis.
7562	 */
7563	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7564		net->prev_cwnd = net->cwnd;
7565		net->net_ack = 0;
7566		net->net_ack2 = 0;
7567
7568		/*
7569		 * CMT: Reset CUC and Fast recovery algo variables before
7570		 * SACK processing
7571		 */
7572		net->new_pseudo_cumack = 0;
7573		net->will_exit_fast_recovery = 0;
7574	}
7575	/* process the new consecutive TSN first */
7576	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7577	while (tp1) {
7578		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
7579		    MAX_TSN) ||
7580		    last_tsn == tp1->rec.data.TSN_seq) {
7581			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7582				/*
7583				 * ECN Nonce: Add the nonce to the sender's
7584				 * nonce sum
7585				 */
7586				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
7587				accum_moved = 1;
7588				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
7589					/*
7590					 * If it is less than ACKED, it is
7591					 * now no-longer in flight. Higher
7592					 * values may occur during marking
7593					 */
7594					if ((tp1->whoTo->dest_state &
7595					    SCTP_ADDR_UNCONFIRMED) &&
7596					    (tp1->snd_count < 2)) {
7597						/*
7598						 * If there was no retran
7599						 * and the address is
7600						 * un-confirmed and we sent
7601						 * there and are now
7602						 * sacked.. its confirmed,
7603						 * mark it so.
7604						 */
7605						tp1->whoTo->dest_state &=
7606						    ~SCTP_ADDR_UNCONFIRMED;
7607					}
7608					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
7609						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7610							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
7611							    tp1->whoTo->flight_size,
7612							    tp1->book_size,
7613							    (uintptr_t) tp1->whoTo,
7614							    tp1->rec.data.TSN_seq);
7615						}
7616						sctp_flight_size_decrease(tp1);
7617						sctp_total_flight_decrease(stcb, tp1);
7618					}
7619					tp1->whoTo->net_ack += tp1->send_size;
7620
7621					/* CMT SFR and DAC algos */
7622					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7623					tp1->whoTo->saw_newack = 1;
7624
7625					if (tp1->snd_count < 2) {
7626						/*
7627						 * True non-retransmited
7628						 * chunk
7629						 */
7630						tp1->whoTo->net_ack2 +=
7631						    tp1->send_size;
7632
7633						/* update RTO too? */
7634						if (tp1->do_rtt) {
7635							tp1->whoTo->RTO =
7636							    sctp_calculate_rto(stcb,
7637							    asoc, tp1->whoTo,
7638							    &tp1->sent_rcv_time,
7639							    sctp_align_safe_nocopy);
7640							tp1->do_rtt = 0;
7641						}
7642					}
7643					/*
7644					 * CMT: CUCv2 algorithm. From the
7645					 * cumack'd TSNs, for each TSN being
7646					 * acked for the first time, set the
7647					 * following variables for the
7648					 * corresp destination.
7649					 * new_pseudo_cumack will trigger a
7650					 * cwnd update.
7651					 * find_(rtx_)pseudo_cumack will
7652					 * trigger search for the next
7653					 * expected (rtx-)pseudo-cumack.
7654					 */
7655					tp1->whoTo->new_pseudo_cumack = 1;
7656					tp1->whoTo->find_pseudo_cumack = 1;
7657					tp1->whoTo->find_rtx_pseudo_cumack = 1;
7658
7659
7660					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7661						sctp_log_sack(asoc->last_acked_seq,
7662						    cum_ack,
7663						    tp1->rec.data.TSN_seq,
7664						    0,
7665						    0,
7666						    SCTP_LOG_TSN_ACKED);
7667					}
7668					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7669						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7670					}
7671				}
7672				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7673					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7674#ifdef SCTP_AUDITING_ENABLED
7675					sctp_audit_log(0xB3,
7676					    (asoc->sent_queue_retran_cnt & 0x000000ff));
7677#endif
7678				}
7679				if (tp1->rec.data.chunk_was_revoked) {
7680					/* deflate the cwnd */
7681					tp1->whoTo->cwnd -= tp1->book_size;
7682					tp1->rec.data.chunk_was_revoked = 0;
7683				}
7684				tp1->sent = SCTP_DATAGRAM_ACKED;
7685			}
7686		} else {
7687			break;
7688		}
7689		tp1 = TAILQ_NEXT(tp1, sctp_next);
7690	}
7691	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
7692	/* always set this up to cum-ack */
7693	asoc->this_sack_highest_gap = last_tsn;
7694
7695	/* Move offset up to point to gaps/dups */
7696	offset += sizeof(struct sctp_nr_sack_chunk);
7697	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
7698
7699		/* skip corrupt segments */
7700		goto skip_segments;
7701	}
7702	if (num_seg > 0) {
7703
7704		/*
7705		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
7706		 * to be greater than the cumack. Also reset saw_newack to 0
7707		 * for all dests.
7708		 */
7709		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7710			net->saw_newack = 0;
7711			net->this_sack_highest_newack = last_tsn;
7712		}
7713
7714		/*
7715		 * thisSackHighestGap will increase while handling NEW
7716		 * segments this_sack_highest_newack will increase while
7717		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
7718		 * used for CMT DAC algo. saw_newack will also change.
7719		 */
7720
7721		sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
7722		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
7723		    num_seg, num_nr_seg, &ecn_seg_sums);
7724
7725
7726		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7727			/*
7728			 * validate the biggest_tsn_acked in the gap acks if
7729			 * strict adherence is wanted.
7730			 */
7731			if ((biggest_tsn_acked == send_s) ||
7732			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
7733				/*
7734				 * peer is either confused or we are under
7735				 * attack. We must abort.
7736				 */
7737				goto hopeless_peer;
7738			}
7739		}
7740	}
7741skip_segments:
7742	/*******************************************/
7743	/* cancel ALL T3-send timer if accum moved */
7744	/*******************************************/
7745	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7746		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7747			if (net->new_pseudo_cumack)
7748				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7749				    stcb, net,
7750				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
7751
7752		}
7753	} else {
7754		if (accum_moved) {
7755			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7756				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7757				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
7758			}
7759		}
7760	}
7761	/********************************************/
7762	/* drop the acked chunks from the sendqueue */
7763	/********************************************/
7764	asoc->last_acked_seq = cum_ack;
7765
7766	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7767	if (tp1 == NULL)
7768		goto done_with_it;
7769	do {
7770		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
7771		    MAX_TSN)) {
7772			break;
7773		}
7774		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
7775			/* no more sent on list */
7776			printf("Warning, tp1->sent == %d and its now acked?\n",
7777			    tp1->sent);
7778		}
7779		tp2 = TAILQ_NEXT(tp1, sctp_next);
7780		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
7781		if (tp1->pr_sctp_on) {
7782			if (asoc->pr_sctp_cnt != 0)
7783				asoc->pr_sctp_cnt--;
7784		}
7785		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
7786		    (asoc->total_flight > 0)) {
7787#ifdef INVARIANTS
7788			panic("Warning flight size is postive and should be 0");
7789#else
7790			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
7791			    asoc->total_flight);
7792#endif
7793			asoc->total_flight = 0;
7794		}
7795		if (tp1->data) {
7796			/* sa_ignore NO_NULL_CHK */
7797			sctp_free_bufspace(stcb, asoc, tp1, 1);
7798			sctp_m_freem(tp1->data);
7799			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
7800				asoc->sent_queue_cnt_removeable--;
7801			}
7802		}
7803		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7804			sctp_log_sack(asoc->last_acked_seq,
7805			    cum_ack,
7806			    tp1->rec.data.TSN_seq,
7807			    0,
7808			    0,
7809			    SCTP_LOG_FREE_SENT);
7810		}
7811		tp1->data = NULL;
7812		asoc->sent_queue_cnt--;
7813		sctp_free_a_chunk(stcb, tp1);
7814		wake_him++;
7815		tp1 = tp2;
7816	} while (tp1 != NULL);
7817
7818done_with_it:
7819	/* sa_ignore NO_NULL_CHK */
7820	if ((wake_him) && (stcb->sctp_socket)) {
7821#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7822		struct socket *so;
7823
7824#endif
7825		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7826		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7827			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
7828		}
7829#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7830		so = SCTP_INP_SO(stcb->sctp_ep);
7831		atomic_add_int(&stcb->asoc.refcnt, 1);
7832		SCTP_TCB_UNLOCK(stcb);
7833		SCTP_SOCKET_LOCK(so, 1);
7834		SCTP_TCB_LOCK(stcb);
7835		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7836		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7837			/* assoc was freed while we were unlocked */
7838			SCTP_SOCKET_UNLOCK(so, 1);
7839			return;
7840		}
7841#endif
7842		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7843#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7844		SCTP_SOCKET_UNLOCK(so, 1);
7845#endif
7846	} else {
7847		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7848			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
7849		}
7850	}
7851
7852	if (asoc->fast_retran_loss_recovery && accum_moved) {
7853		if (compare_with_wrap(asoc->last_acked_seq,
7854		    asoc->fast_recovery_tsn, MAX_TSN) ||
7855		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
7856			/* Setup so we will exit RFC2582 fast recovery */
7857			will_exit_fast_recovery = 1;
7858		}
7859	}
7860	/*
7861	 * Check for revoked fragments:
7862	 *
7863	 * if Previous sack - Had no frags then we can't have any revoked if
7864	 * Previous sack - Had frag's then - If we now have frags aka
7865	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
7866	 * some of them. else - The peer revoked all ACKED fragments, since
7867	 * we had some before and now we have NONE.
7868	 */
7869
7870	if (num_seg)
7871		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7872
7873	else if (asoc->saw_sack_with_frags) {
7874		int cnt_revoked = 0;
7875
7876		tp1 = TAILQ_FIRST(&asoc->sent_queue);
7877		if (tp1 != NULL) {
7878			/* Peer revoked all dg's marked or acked */
7879			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
7880				/*
7881				 * EY- maybe check only if it is nr_acked
7882				 * nr_marked may not be possible
7883				 */
7884				if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
7885				    (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
7886					/*
7887					 * EY! - TODO: Something previously
7888					 * nr_gapped is reneged, abort the
7889					 * association
7890					 */
7891					return;
7892				}
7893				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
7894				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
7895					tp1->sent = SCTP_DATAGRAM_SENT;
7896					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7897						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
7898						    tp1->whoTo->flight_size,
7899						    tp1->book_size,
7900						    (uintptr_t) tp1->whoTo,
7901						    tp1->rec.data.TSN_seq);
7902					}
7903					sctp_flight_size_increase(tp1);
7904					sctp_total_flight_increase(stcb, tp1);
7905					tp1->rec.data.chunk_was_revoked = 1;
7906					/*
7907					 * To ensure that this increase in
7908					 * flightsize, which is artificial,
7909					 * does not throttle the sender, we
7910					 * also increase the cwnd
7911					 * artificially.
7912					 */
7913					tp1->whoTo->cwnd += tp1->book_size;
7914					cnt_revoked++;
7915				}
7916			}
7917			if (cnt_revoked) {
7918				reneged_all = 1;
7919			}
7920		}
7921		asoc->saw_sack_with_frags = 0;
7922	}
7923	if (num_seg)
7924		asoc->saw_sack_with_frags = 1;
7925	else
7926		asoc->saw_sack_with_frags = 0;
7927
7928	/* EY! - not sure about if there should be an IF */
7929	if (num_nr_seg)
7930		sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7931	else if (asoc->saw_sack_with_nr_frags) {
7932		/*
7933		 * EY!- TODO: all previously nr_gapped chunks have been
7934		 * reneged abort the association
7935		 */
7936		asoc->saw_sack_with_nr_frags = 0;
7937	}
7938	if (num_nr_seg)
7939		asoc->saw_sack_with_nr_frags = 1;
7940	else
7941		asoc->saw_sack_with_nr_frags = 0;
7942	/* JRS - Use the congestion control given in the CC module */
7943	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
7944
7945	if (TAILQ_EMPTY(&asoc->sent_queue)) {
7946		/* nothing left in-flight */
7947		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7948			/* stop all timers */
7949			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7950				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7951					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
7952					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7953					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
7954				}
7955			}
7956			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7957			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
7958			net->flight_size = 0;
7959			net->partial_bytes_acked = 0;
7960		}
7961		asoc->total_flight = 0;
7962		asoc->total_flight_count = 0;
7963	}
7964	/**********************************/
7965	/* Now what about shutdown issues */
7966	/**********************************/
7967	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
7968		/* nothing left on sendqueue.. consider done */
7969		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7970			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7971			    asoc->peers_rwnd, 0, 0, a_rwnd);
7972		}
7973		asoc->peers_rwnd = a_rwnd;
7974		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7975			/* SWS sender side engages */
7976			asoc->peers_rwnd = 0;
7977		}
7978		/* clean up */
7979		if ((asoc->stream_queue_cnt == 1) &&
7980		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7981		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
7982		    (asoc->locked_on_sending)
7983		    ) {
7984			struct sctp_stream_queue_pending *sp;
7985
7986			/*
7987			 * I may be in a state where we got all across.. but
7988			 * cannot write more due to a shutdown... we abort
7989			 * since the user did not indicate EOR in this case.
7990			 */
7991			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
7992			    sctp_streamhead);
7993			if ((sp) && (sp->length == 0)) {
7994				asoc->locked_on_sending = NULL;
7995				if (sp->msg_is_complete) {
7996					asoc->stream_queue_cnt--;
7997				} else {
7998					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
7999					asoc->stream_queue_cnt--;
8000				}
8001			}
8002		}
8003		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
8004		    (asoc->stream_queue_cnt == 0)) {
8005			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
8006				/* Need to abort here */
8007				struct mbuf *oper;
8008
8009		abort_out_now:
8010				*abort_now = 1;
8011				/* XXX */
8012				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
8013				    0, M_DONTWAIT, 1, MT_DATA);
8014				if (oper) {
8015					struct sctp_paramhdr *ph;
8016					uint32_t *ippp;
8017
8018					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
8019					    sizeof(uint32_t);
8020					ph = mtod(oper, struct sctp_paramhdr *);
8021					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
8022					ph->param_length = htons(SCTP_BUF_LEN(oper));
8023					ippp = (uint32_t *) (ph + 1);
8024					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
8025				}
8026				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
8027				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
8028				return;
8029			} else {
8030				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
8031				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
8032					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8033				}
8034				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
8035				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8036				sctp_stop_timers_for_shutdown(stcb);
8037				sctp_send_shutdown(stcb,
8038				    stcb->asoc.primary_destination);
8039				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
8040				    stcb->sctp_ep, stcb, asoc->primary_destination);
8041				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
8042				    stcb->sctp_ep, stcb, asoc->primary_destination);
8043			}
8044			return;
8045		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
8046		    (asoc->stream_queue_cnt == 0)) {
8047			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
8048				goto abort_out_now;
8049			}
8050			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8051			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
8052			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8053			sctp_send_shutdown_ack(stcb,
8054			    stcb->asoc.primary_destination);
8055
8056			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
8057			    stcb->sctp_ep, stcb, asoc->primary_destination);
8058			return;
8059		}
8060	}
8061	/*
8062	 * Now here we are going to recycle net_ack for a different use...
8063	 * HEADS UP.
8064	 */
8065	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8066		net->net_ack = 0;
8067	}
8068
8069	/*
8070	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
8071	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
8072	 * automatically ensure that.
8073	 */
8074	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
8075		this_sack_lowest_newack = cum_ack;
8076	}
8077	if (num_seg > 0) {
8078		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
8079		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
8080	}
8081	/* JRS - Use the congestion control given in the CC module */
8082	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
8083
8084	/******************************************************************
8085	 *  Here we do the stuff with ECN Nonce checking.
8086	 *  We basically check to see if the nonce sum flag was incorrect
8087	 *  or if resynchronization needs to be done. Also if we catch a
8088	 *  misbehaving receiver we give him the kick.
8089	 ******************************************************************/
8090
8091	if (asoc->ecn_nonce_allowed) {
8092		if (asoc->nonce_sum_check) {
8093			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
8094				if (asoc->nonce_wait_for_ecne == 0) {
8095					struct sctp_tmit_chunk *lchk;
8096
8097					lchk = TAILQ_FIRST(&asoc->send_queue);
8098					asoc->nonce_wait_for_ecne = 1;
8099					if (lchk) {
8100						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
8101					} else {
8102						asoc->nonce_wait_tsn = asoc->sending_seq;
8103					}
8104				} else {
8105					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
8106					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
8107						/*
8108						 * Misbehaving peer. We need
8109						 * to react to this guy
8110						 */
8111						asoc->ecn_allowed = 0;
8112						asoc->ecn_nonce_allowed = 0;
8113					}
8114				}
8115			}
8116		} else {
8117			/* See if Resynchronization Possible */
8118			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
8119				asoc->nonce_sum_check = 1;
8120				/*
8121				 * now we must calculate what the base is.
8122				 * We do this based on two things, we know
8123				 * the total's for all the segments
8124				 * gap-acked in the SACK, its stored in
8125				 * ecn_seg_sums. We also know the SACK's
8126				 * nonce sum, its in nonce_sum_flag. So we
8127				 * can build a truth table to back-calculate
8128				 * the new value of
8129				 * asoc->nonce_sum_expect_base:
8130				 *
8131				 * SACK-flag-Value         Seg-Sums Base 0 0 0
8132				 * 1                    0 1 0 1 1 1 1 0
8133				 */
8134				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
8135			}
8136		}
8137	}
8138	/* Now are we exiting loss recovery ? */
8139	if (will_exit_fast_recovery) {
8140		/* Ok, we must exit fast recovery */
8141		asoc->fast_retran_loss_recovery = 0;
8142	}
8143	if ((asoc->sat_t3_loss_recovery) &&
8144	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
8145	    MAX_TSN) ||
8146	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
8147		/* end satellite t3 loss recovery */
8148		asoc->sat_t3_loss_recovery = 0;
8149	}
8150	/*
8151	 * CMT Fast recovery
8152	 */
8153	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8154		if (net->will_exit_fast_recovery) {
8155			/* Ok, we must exit fast recovery */
8156			net->fast_retran_loss_recovery = 0;
8157		}
8158	}
8159
8160	/* Adjust and set the new rwnd value */
8161	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
8162		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
8163		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
8164	}
8165	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
8166	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
8167	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
8168		/* SWS sender side engages */
8169		asoc->peers_rwnd = 0;
8170	}
8171	if (asoc->peers_rwnd > old_rwnd) {
8172		win_probe_recovery = 1;
8173	}
8174	/*
8175	 * Now we must setup so we have a timer up for anyone with
8176	 * outstanding data.
8177	 */
8178	done_once = 0;
8179again:
8180	j = 0;
8181	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8182		if (win_probe_recovery && (net->window_probe)) {
8183			win_probe_recovered = 1;
8184			/*-
8185			 * Find first chunk that was used with
8186			 * window probe and clear the event. Put
8187			 * it back into the send queue as if has
8188			 * not been sent.
8189			 */
8190			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8191				if (tp1->window_probe) {
8192					sctp_window_probe_recovery(stcb, asoc, net, tp1);
8193					break;
8194				}
8195			}
8196		}
8197		if (net->flight_size) {
8198			j++;
8199			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8200			    stcb->sctp_ep, stcb, net);
8201			if (net->window_probe) {
8202				net->window_probe = 0;
8203			}
8204		} else {
8205			if (net->window_probe) {
8206				net->window_probe = 0;
8207				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8208				    stcb->sctp_ep, stcb, net);
8209			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8210				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
8211				    stcb, net,
8212				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
8213			}
8214			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8215				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8216					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
8217					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
8218					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
8219				}
8220			}
8221		}
8222	}
8223	if ((j == 0) &&
8224	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
8225	    (asoc->sent_queue_retran_cnt == 0) &&
8226	    (win_probe_recovered == 0) &&
8227	    (done_once == 0)) {
8228		/*
8229		 * huh, this should not happen unless all packets are
8230		 * PR-SCTP and marked to skip of course.
8231		 */
8232		if (sctp_fs_audit(asoc)) {
8233			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8234				net->flight_size = 0;
8235			}
8236			asoc->total_flight = 0;
8237			asoc->total_flight_count = 0;
8238			asoc->sent_queue_retran_cnt = 0;
8239			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8240				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
8241					sctp_flight_size_increase(tp1);
8242					sctp_total_flight_increase(stcb, tp1);
8243				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
8244					asoc->sent_queue_retran_cnt++;
8245				}
8246			}
8247		}
8248		done_once = 1;
8249		goto again;
8250	}
8251	/*********************************************/
8252	/* Here we perform PR-SCTP procedures        */
8253	/* (section 4.2)                             */
8254	/*********************************************/
8255	/* C1. update advancedPeerAckPoint */
8256	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
8257		asoc->advanced_peer_ack_point = cum_ack;
8258	}
8259	/* C2. try to further move advancedPeerAckPoint ahead */
8260	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
8261		struct sctp_tmit_chunk *lchk;
8262		uint32_t old_adv_peer_ack_point;
8263
8264		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
8265		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
8266		/* C3. See if we need to send a Fwd-TSN */
8267		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
8268		    MAX_TSN)) {
8269			/*
8270			 * ISSUE with ECN, see FWD-TSN processing for notes
8271			 * on issues that will occur when the ECN NONCE
8272			 * stuff is put into SCTP for cross checking.
8273			 */
8274			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
8275			    MAX_TSN)) {
8276				send_forward_tsn(stcb, asoc);
8277				/*
8278				 * ECN Nonce: Disable Nonce Sum check when
8279				 * FWD TSN is sent and store resync tsn
8280				 */
8281				asoc->nonce_sum_check = 0;
8282				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
8283			} else if (lchk) {
8284				/* try to FR fwd-tsn's that get lost too */
8285				lchk->rec.data.fwd_tsn_cnt++;
8286				if (lchk->rec.data.fwd_tsn_cnt > 3) {
8287					send_forward_tsn(stcb, asoc);
8288					lchk->rec.data.fwd_tsn_cnt = 0;
8289				}
8290			}
8291		}
8292		if (lchk) {
8293			/* Assure a timer is up */
8294			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8295			    stcb->sctp_ep, stcb, lchk->whoTo);
8296		}
8297	}
8298	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
8299		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
8300		    a_rwnd,
8301		    stcb->asoc.peers_rwnd,
8302		    stcb->asoc.total_flight,
8303		    stcb->asoc.total_output_queue_size);
8304	}
8305}
8306