sctp_indata.c revision 190689
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 190689 2009-04-04 11:43:32Z rrs $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_indata.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_timer.h>
47
48
49/*
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
53 *
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
56 * the list.
57 */
58
59void
60sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61{
62	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63}
64
65/* Calculate what the rwnd would be */
66uint32_t
67sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68{
69	uint32_t calc = 0;
70
71	/*
72	 * This is really set wrong with respect to a 1-2-m socket. Since
73	 * the sb_cc is the count that everyone as put up. When we re-write
74	 * sctp_soreceive then we will fix this so that ONLY this
75	 * associations data is taken into account.
76	 */
77	if (stcb->sctp_socket == NULL)
78		return (calc);
79
80	if (stcb->asoc.sb_cc == 0 &&
81	    asoc->size_on_reasm_queue == 0 &&
82	    asoc->size_on_all_streams == 0) {
83		/* Full rwnd granted */
84		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85		return (calc);
86	}
87	/* get actual space */
88	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89
90	/*
91	 * take out what has NOT been put on socket queue and we yet hold
92	 * for putting up.
93	 */
94	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96
97	if (calc == 0) {
98		/* out of space */
99		return (calc);
100	}
101	/* what is the overhead of all these rwnd's */
102	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
103	/*
104	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105	 * even it is 0. SWS engaged
106	 */
107	if (calc < stcb->asoc.my_rwnd_control_len) {
108		calc = 1;
109	}
110	return (calc);
111}
112
113
114
115/*
116 * Build out our readq entry based on the incoming packet.
117 */
118struct sctp_queued_to_read *
119sctp_build_readq_entry(struct sctp_tcb *stcb,
120    struct sctp_nets *net,
121    uint32_t tsn, uint32_t ppid,
122    uint32_t context, uint16_t stream_no,
123    uint16_t stream_seq, uint8_t flags,
124    struct mbuf *dm)
125{
126	struct sctp_queued_to_read *read_queue_e = NULL;
127
128	sctp_alloc_a_readq(stcb, read_queue_e);
129	if (read_queue_e == NULL) {
130		goto failed_build;
131	}
132	read_queue_e->sinfo_stream = stream_no;
133	read_queue_e->sinfo_ssn = stream_seq;
134	read_queue_e->sinfo_flags = (flags << 8);
135	read_queue_e->sinfo_ppid = ppid;
136	read_queue_e->sinfo_context = stcb->asoc.context;
137	read_queue_e->sinfo_timetolive = 0;
138	read_queue_e->sinfo_tsn = tsn;
139	read_queue_e->sinfo_cumtsn = tsn;
140	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141	read_queue_e->whoFrom = net;
142	read_queue_e->length = 0;
143	atomic_add_int(&net->ref_count, 1);
144	read_queue_e->data = dm;
145	read_queue_e->spec_flags = 0;
146	read_queue_e->tail_mbuf = NULL;
147	read_queue_e->aux_data = NULL;
148	read_queue_e->stcb = stcb;
149	read_queue_e->port_from = stcb->rport;
150	read_queue_e->do_not_ref_stcb = 0;
151	read_queue_e->end_added = 0;
152	read_queue_e->some_taken = 0;
153	read_queue_e->pdapi_aborted = 0;
154failed_build:
155	return (read_queue_e);
156}
157
158
159/*
160 * Build out our readq entry based on the incoming packet.
161 */
162static struct sctp_queued_to_read *
163sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164    struct sctp_tmit_chunk *chk)
165{
166	struct sctp_queued_to_read *read_queue_e = NULL;
167
168	sctp_alloc_a_readq(stcb, read_queue_e);
169	if (read_queue_e == NULL) {
170		goto failed_build;
171	}
172	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176	read_queue_e->sinfo_context = stcb->asoc.context;
177	read_queue_e->sinfo_timetolive = 0;
178	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181	read_queue_e->whoFrom = chk->whoTo;
182	read_queue_e->aux_data = NULL;
183	read_queue_e->length = 0;
184	atomic_add_int(&chk->whoTo->ref_count, 1);
185	read_queue_e->data = chk->data;
186	read_queue_e->tail_mbuf = NULL;
187	read_queue_e->stcb = stcb;
188	read_queue_e->port_from = stcb->rport;
189	read_queue_e->spec_flags = 0;
190	read_queue_e->do_not_ref_stcb = 0;
191	read_queue_e->end_added = 0;
192	read_queue_e->some_taken = 0;
193	read_queue_e->pdapi_aborted = 0;
194failed_build:
195	return (read_queue_e);
196}
197
198
199struct mbuf *
200sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201    struct sctp_sndrcvinfo *sinfo)
202{
203	struct sctp_sndrcvinfo *outinfo;
204	struct cmsghdr *cmh;
205	struct mbuf *ret;
206	int len;
207	int use_extended = 0;
208
209	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210		/* user does not want the sndrcv ctl */
211		return (NULL);
212	}
213	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214		use_extended = 1;
215		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
216	} else {
217		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
218	}
219
220
221	ret = sctp_get_mbuf_for_msg(len,
222	    0, M_DONTWAIT, 1, MT_DATA);
223
224	if (ret == NULL) {
225		/* No space */
226		return (ret);
227	}
228	/* We need a CMSG header followed by the struct  */
229	cmh = mtod(ret, struct cmsghdr *);
230	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231	cmh->cmsg_level = IPPROTO_SCTP;
232	if (use_extended) {
233		cmh->cmsg_type = SCTP_EXTRCV;
234		cmh->cmsg_len = len;
235		memcpy(outinfo, sinfo, len);
236	} else {
237		cmh->cmsg_type = SCTP_SNDRCV;
238		cmh->cmsg_len = len;
239		*outinfo = *sinfo;
240	}
241	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
242	return (ret);
243}
244
245
246char *
247sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
248    int *control_len,
249    struct sctp_sndrcvinfo *sinfo)
250{
251	struct sctp_sndrcvinfo *outinfo;
252	struct cmsghdr *cmh;
253	char *buf;
254	int len;
255	int use_extended = 0;
256
257	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258		/* user does not want the sndrcv ctl */
259		return (NULL);
260	}
261	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
262		use_extended = 1;
263		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
264	} else {
265		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
266	}
267	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
268	if (buf == NULL) {
269		/* No space */
270		return (buf);
271	}
272	/* We need a CMSG header followed by the struct  */
273	cmh = (struct cmsghdr *)buf;
274	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275	cmh->cmsg_level = IPPROTO_SCTP;
276	if (use_extended) {
277		cmh->cmsg_type = SCTP_EXTRCV;
278		cmh->cmsg_len = len;
279		memcpy(outinfo, sinfo, len);
280	} else {
281		cmh->cmsg_type = SCTP_SNDRCV;
282		cmh->cmsg_len = len;
283		*outinfo = *sinfo;
284	}
285	*control_len = len;
286	return (buf);
287}
288
289
290/*
291 * We are delivering currently from the reassembly queue. We must continue to
292 * deliver until we either: 1) run out of space. 2) run out of sequential
293 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
294 */
295static void
296sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
297{
298	struct sctp_tmit_chunk *chk;
299	uint16_t nxt_todel;
300	uint16_t stream_no;
301	int end = 0;
302	int cntDel;
303
304	/* EY if any out-of-order delivered, then tag it nr on nr_map */
305	uint32_t nr_tsn, nr_gap;
306
307	struct sctp_queued_to_read *control, *ctl, *ctlat;
308
309	if (stcb == NULL)
310		return;
311
312	cntDel = stream_no = 0;
313	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
314	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
315	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
316		/* socket above is long gone or going.. */
317abandon:
318		asoc->fragmented_delivery_inprogress = 0;
319		chk = TAILQ_FIRST(&asoc->reasmqueue);
320		while (chk) {
321			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
322			asoc->size_on_reasm_queue -= chk->send_size;
323			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
324			/*
325			 * Lose the data pointer, since its in the socket
326			 * buffer
327			 */
328			if (chk->data) {
329				sctp_m_freem(chk->data);
330				chk->data = NULL;
331			}
332			/* Now free the address and data */
333			sctp_free_a_chunk(stcb, chk);
334			/* sa_ignore FREED_MEMORY */
335			chk = TAILQ_FIRST(&asoc->reasmqueue);
336		}
337		return;
338	}
339	SCTP_TCB_LOCK_ASSERT(stcb);
340	do {
341		chk = TAILQ_FIRST(&asoc->reasmqueue);
342		if (chk == NULL) {
343			return;
344		}
345		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
346			/* Can't deliver more :< */
347			return;
348		}
349		stream_no = chk->rec.data.stream_number;
350		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
351		if (nxt_todel != chk->rec.data.stream_seq &&
352		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
353			/*
354			 * Not the next sequence to deliver in its stream OR
355			 * unordered
356			 */
357			return;
358		}
359		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
360
361			control = sctp_build_readq_entry_chk(stcb, chk);
362			if (control == NULL) {
363				/* out of memory? */
364				return;
365			}
366			/* save it off for our future deliveries */
367			stcb->asoc.control_pdapi = control;
368			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
369				end = 1;
370			else
371				end = 0;
372			sctp_add_to_readq(stcb->sctp_ep,
373			    stcb, control, &stcb->sctp_socket->so_rcv, end, SCTP_SO_NOT_LOCKED);
374			cntDel++;
375		} else {
376			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
377				end = 1;
378			else
379				end = 0;
380			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
381			    stcb->asoc.control_pdapi,
382			    chk->data, end, chk->rec.data.TSN_seq,
383			    &stcb->sctp_socket->so_rcv)) {
384				/*
385				 * something is very wrong, either
386				 * control_pdapi is NULL, or the tail_mbuf
387				 * is corrupt, or there is a EOM already on
388				 * the mbuf chain.
389				 */
390				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
391					goto abandon;
392				} else {
393#ifdef INVARIANTS
394					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
395						panic("This should not happen control_pdapi NULL?");
396					}
397					/* if we did not panic, it was a EOM */
398					panic("Bad chunking ??");
399#else
400					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
401						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
402					}
403					SCTP_PRINTF("Bad chunking ??\n");
404					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
405
406#endif
407					goto abandon;
408				}
409			}
410			cntDel++;
411		}
412		/* pull it we did it */
413		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
414		/*
415		 * EY this is the chunk that should be tagged nr gapped
416		 * calculate the gap and such then tag this TSN nr
417		 * chk->rec.data.TSN_seq
418		 */
419		/*
420		 * EY!-TODO- this tsn should be tagged nr only if it is
421		 * out-of-order, the if statement should be modified
422		 */
423		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
424
425			nr_tsn = chk->rec.data.TSN_seq;
426			if ((compare_with_wrap(nr_tsn, asoc->nr_mapping_array_base_tsn, MAX_TSN)) ||
427			    (nr_tsn == asoc->nr_mapping_array_base_tsn)) {
428				nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
429			} else {
430				nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
431			}
432			if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3)) ||
433			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
434				/*
435				 * EY The 1st should never happen, as in
436				 * process_a_data_chunk method this check
437				 * should be done
438				 */
439				/*
440				 * EY The 2nd should never happen, because
441				 * nr_mapping_array is always expanded when
442				 * mapping_array is expanded
443				 */
444				printf("Impossible nr_gap ack range failed\n");
445			} else {
446				SCTP_TCB_LOCK_ASSERT(stcb);
447				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
448				if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
449					asoc->highest_tsn_inside_nr_map = nr_tsn;
450			}
451		}
452		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
453			asoc->fragmented_delivery_inprogress = 0;
454			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
455				asoc->strmin[stream_no].last_sequence_delivered++;
456			}
457			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
458				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
459			}
460		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
461			/*
462			 * turn the flag back on since we just  delivered
463			 * yet another one.
464			 */
465			asoc->fragmented_delivery_inprogress = 1;
466		}
467		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
468		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
469		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
470		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
471
472		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
473		asoc->size_on_reasm_queue -= chk->send_size;
474		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
475		/* free up the chk */
476		chk->data = NULL;
477		sctp_free_a_chunk(stcb, chk);
478
479		if (asoc->fragmented_delivery_inprogress == 0) {
480			/*
481			 * Now lets see if we can deliver the next one on
482			 * the stream
483			 */
484			struct sctp_stream_in *strm;
485
486			strm = &asoc->strmin[stream_no];
487			nxt_todel = strm->last_sequence_delivered + 1;
488			ctl = TAILQ_FIRST(&strm->inqueue);
489			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
490				while (ctl != NULL) {
491					/* Deliver more if we can. */
492					if (nxt_todel == ctl->sinfo_ssn) {
493						ctlat = TAILQ_NEXT(ctl, next);
494						TAILQ_REMOVE(&strm->inqueue, ctl, next);
495						asoc->size_on_all_streams -= ctl->length;
496						sctp_ucount_decr(asoc->cnt_on_all_streams);
497						strm->last_sequence_delivered++;
498						/*
499						 * EY will be used to
500						 * calculate nr-gap
501						 */
502						nr_tsn = ctl->sinfo_tsn;
503						sctp_add_to_readq(stcb->sctp_ep, stcb,
504						    ctl,
505						    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
506						/*
507						 * EY -now something is
508						 * delivered, calculate
509						 * nr_gap and tag this tsn
510						 * NR
511						 */
512						if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
513
514							if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
515								nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
516							} else {
517								nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
518							}
519							if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
520							    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
521								/*
522								 * EY The
523								 * 1st
524								 * should
525								 * never
526								 * happen,
527								 * as in
528								 * process_a_
529								 * data_chunk
530								 *  method
531								 * this
532								 * check
533								 * should be
534								 * done
535								 */
536								/*
537								 * EY The
538								 * 2nd
539								 * should
540								 * never
541								 * happen,
542								 * because
543								 * nr_mapping
544								 * _array is
545								 * always
546								 * expanded
547								 * when
548								 * mapping_ar
549								 * ray is
550								 * expanded
551								 */
552							} else {
553								SCTP_TCB_LOCK_ASSERT(stcb);
554								SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
555								if (compare_with_wrap(nr_tsn,
556								    asoc->highest_tsn_inside_nr_map,
557								    MAX_TSN))
558									asoc->highest_tsn_inside_nr_map = nr_tsn;
559							}
560						}
561						ctl = ctlat;
562					} else {
563						break;
564					}
565					nxt_todel = strm->last_sequence_delivered + 1;
566				}
567			}
568			break;
569		}
570		/* sa_ignore FREED_MEMORY */
571		chk = TAILQ_FIRST(&asoc->reasmqueue);
572	} while (chk);
573}
574
575/*
576 * Queue the chunk either right into the socket buffer if it is the next one
577 * to go OR put it in the correct place in the delivery queue.  If we do
578 * append to the so_buf, keep doing so until we are out of order. One big
579 * question still remains, what to do when the socket buffer is FULL??
580 */
581static void
582sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
583    struct sctp_queued_to_read *control, int *abort_flag)
584{
585	/*
586	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
587	 * all the data in one stream this could happen quite rapidly. One
588	 * could use the TSN to keep track of things, but this scheme breaks
589	 * down in the other type of stream useage that could occur. Send a
590	 * single msg to stream 0, send 4Billion messages to stream 1, now
591	 * send a message to stream 0. You have a situation where the TSN
592	 * has wrapped but not in the stream. Is this worth worrying about
593	 * or should we just change our queue sort at the bottom to be by
594	 * TSN.
595	 *
596	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
597	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
598	 * assignment this could happen... and I don't see how this would be
599	 * a violation. So for now I am undecided an will leave the sort by
600	 * SSN alone. Maybe a hybred approach is the answer
601	 *
602	 */
603	struct sctp_stream_in *strm;
604	struct sctp_queued_to_read *at;
605	int queue_needed;
606	uint16_t nxt_todel;
607	struct mbuf *oper;
608
609	/* EY- will be used to calculate nr-gap for a tsn */
610	uint32_t nr_tsn, nr_gap;
611
612	queue_needed = 1;
613	asoc->size_on_all_streams += control->length;
614	sctp_ucount_incr(asoc->cnt_on_all_streams);
615	strm = &asoc->strmin[control->sinfo_stream];
616	nxt_todel = strm->last_sequence_delivered + 1;
617	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
618		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
619	}
620	SCTPDBG(SCTP_DEBUG_INDATA1,
621	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
622	    (uint32_t) control->sinfo_stream,
623	    (uint32_t) strm->last_sequence_delivered,
624	    (uint32_t) nxt_todel);
625	if (compare_with_wrap(strm->last_sequence_delivered,
626	    control->sinfo_ssn, MAX_SEQ) ||
627	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
628		/* The incoming sseq is behind where we last delivered? */
629		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
630		    control->sinfo_ssn, strm->last_sequence_delivered);
631protocol_error:
632		/*
633		 * throw it in the stream so it gets cleaned up in
634		 * association destruction
635		 */
636		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
637		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
638		    0, M_DONTWAIT, 1, MT_DATA);
639		if (oper) {
640			struct sctp_paramhdr *ph;
641			uint32_t *ippp;
642
643			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
644			    (sizeof(uint32_t) * 3);
645			ph = mtod(oper, struct sctp_paramhdr *);
646			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
647			ph->param_length = htons(SCTP_BUF_LEN(oper));
648			ippp = (uint32_t *) (ph + 1);
649			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
650			ippp++;
651			*ippp = control->sinfo_tsn;
652			ippp++;
653			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
654		}
655		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
656		sctp_abort_an_association(stcb->sctp_ep, stcb,
657		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
658
659		*abort_flag = 1;
660		return;
661
662	}
663	if (nxt_todel == control->sinfo_ssn) {
664		/* can be delivered right away? */
665		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
666			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
667		}
668		/* EY it wont be queued if it could be delivered directly */
669		queue_needed = 0;
670		asoc->size_on_all_streams -= control->length;
671		sctp_ucount_decr(asoc->cnt_on_all_streams);
672		strm->last_sequence_delivered++;
673		/* EY will be used to calculate nr-gap */
674		nr_tsn = control->sinfo_tsn;
675		sctp_add_to_readq(stcb->sctp_ep, stcb,
676		    control,
677		    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
678
679		/*
680		 * EY this is the chunk that should be tagged nr gapped
681		 * calculate the gap and such then tag this TSN nr
682		 * chk->rec.data.TSN_seq
683		 */
684		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
685
686			if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
687				nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
688			} else {
689				nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
690			}
691			if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
692			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
693				/*
694				 * EY The 1st should never happen, as in
695				 * process_a_data_chunk method this check
696				 * should be done
697				 */
698				/*
699				 * EY The 2nd should never happen, because
700				 * nr_mapping_array is always expanded when
701				 * mapping_array is expanded
702				 */
703			} else {
704				SCTP_TCB_LOCK_ASSERT(stcb);
705				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
706				if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
707					asoc->highest_tsn_inside_nr_map = nr_tsn;
708			}
709		}
710		control = TAILQ_FIRST(&strm->inqueue);
711		while (control != NULL) {
712			/* all delivered */
713			nxt_todel = strm->last_sequence_delivered + 1;
714			if (nxt_todel == control->sinfo_ssn) {
715				at = TAILQ_NEXT(control, next);
716				TAILQ_REMOVE(&strm->inqueue, control, next);
717				asoc->size_on_all_streams -= control->length;
718				sctp_ucount_decr(asoc->cnt_on_all_streams);
719				strm->last_sequence_delivered++;
720				/*
721				 * We ignore the return of deliver_data here
722				 * since we always can hold the chunk on the
723				 * d-queue. And we have a finite number that
724				 * can be delivered from the strq.
725				 */
726				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
727					sctp_log_strm_del(control, NULL,
728					    SCTP_STR_LOG_FROM_IMMED_DEL);
729				}
730				/* EY will be used to calculate nr-gap */
731				nr_tsn = control->sinfo_tsn;
732				sctp_add_to_readq(stcb->sctp_ep, stcb,
733				    control,
734				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
735				/*
736				 * EY this is the chunk that should be
737				 * tagged nr gapped calculate the gap and
738				 * such then tag this TSN nr
739				 * chk->rec.data.TSN_seq
740				 */
741				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
742
743					if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
744						nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
745					} else {
746						nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
747					}
748					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
749					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
750						/*
751						 * EY The 1st should never
752						 * happen, as in
753						 * process_a_data_chunk
754						 * method this check should
755						 * be done
756						 */
757						/*
758						 * EY The 2nd should never
759						 * happen, because
760						 * nr_mapping_array is
761						 * always expanded when
762						 * mapping_array is expanded
763						 */
764					} else {
765						SCTP_TCB_LOCK_ASSERT(stcb);
766						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
767						if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
768						    MAX_TSN))
769							asoc->highest_tsn_inside_nr_map = nr_tsn;
770					}
771				}
772				control = at;
773				continue;
774			}
775			break;
776		}
777	}
778	if (queue_needed) {
779		/*
780		 * Ok, we did not deliver this guy, find the correct place
781		 * to put it on the queue.
782		 */
783		if ((compare_with_wrap(asoc->cumulative_tsn,
784		    control->sinfo_tsn, MAX_TSN)) ||
785		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
786			goto protocol_error;
787		}
788		if (TAILQ_EMPTY(&strm->inqueue)) {
789			/* Empty queue */
790			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
791				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
792			}
793			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
794		} else {
795			TAILQ_FOREACH(at, &strm->inqueue, next) {
796				if (compare_with_wrap(at->sinfo_ssn,
797				    control->sinfo_ssn, MAX_SEQ)) {
798					/*
799					 * one in queue is bigger than the
800					 * new one, insert before this one
801					 */
802					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
803						sctp_log_strm_del(control, at,
804						    SCTP_STR_LOG_FROM_INSERT_MD);
805					}
806					TAILQ_INSERT_BEFORE(at, control, next);
807					break;
808				} else if (at->sinfo_ssn == control->sinfo_ssn) {
809					/*
810					 * Gak, He sent me a duplicate str
811					 * seq number
812					 */
813					/*
814					 * foo bar, I guess I will just free
815					 * this new guy, should we abort
816					 * too? FIX ME MAYBE? Or it COULD be
817					 * that the SSN's have wrapped.
818					 * Maybe I should compare to TSN
819					 * somehow... sigh for now just blow
820					 * away the chunk!
821					 */
822
823					if (control->data)
824						sctp_m_freem(control->data);
825					control->data = NULL;
826					asoc->size_on_all_streams -= control->length;
827					sctp_ucount_decr(asoc->cnt_on_all_streams);
828					if (control->whoFrom)
829						sctp_free_remote_addr(control->whoFrom);
830					control->whoFrom = NULL;
831					sctp_free_a_readq(stcb, control);
832					return;
833				} else {
834					if (TAILQ_NEXT(at, next) == NULL) {
835						/*
836						 * We are at the end, insert
837						 * it after this one
838						 */
839						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
840							sctp_log_strm_del(control, at,
841							    SCTP_STR_LOG_FROM_INSERT_TL);
842						}
843						TAILQ_INSERT_AFTER(&strm->inqueue,
844						    at, control, next);
845						break;
846					}
847				}
848			}
849		}
850	}
851}
852
853/*
854 * Returns two things: You get the total size of the deliverable parts of the
855 * first fragmented message on the reassembly queue. And you get a 1 back if
856 * all of the message is ready or a 0 back if the message is still incomplete
857 */
858static int
859sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
860{
861	struct sctp_tmit_chunk *chk;
862	uint32_t tsn;
863
864	*t_size = 0;
865	chk = TAILQ_FIRST(&asoc->reasmqueue);
866	if (chk == NULL) {
867		/* nothing on the queue */
868		return (0);
869	}
870	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
871		/* Not a first on the queue */
872		return (0);
873	}
874	tsn = chk->rec.data.TSN_seq;
875	while (chk) {
876		if (tsn != chk->rec.data.TSN_seq) {
877			return (0);
878		}
879		*t_size += chk->send_size;
880		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
881			return (1);
882		}
883		tsn++;
884		chk = TAILQ_NEXT(chk, sctp_next);
885	}
886	return (0);
887}
888
889static void
890sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
891{
892	struct sctp_tmit_chunk *chk;
893	uint16_t nxt_todel;
894	uint32_t tsize;
895
896doit_again:
897	chk = TAILQ_FIRST(&asoc->reasmqueue);
898	if (chk == NULL) {
899		/* Huh? */
900		asoc->size_on_reasm_queue = 0;
901		asoc->cnt_on_reasm_queue = 0;
902		return;
903	}
904	if (asoc->fragmented_delivery_inprogress == 0) {
905		nxt_todel =
906		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
907		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
908		    (nxt_todel == chk->rec.data.stream_seq ||
909		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
910			/*
911			 * Yep the first one is here and its ok to deliver
912			 * but should we?
913			 */
914			if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
915			    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
916
917				/*
918				 * Yes, we setup to start reception, by
919				 * backing down the TSN just in case we
920				 * can't deliver. If we
921				 */
922				asoc->fragmented_delivery_inprogress = 1;
923				asoc->tsn_last_delivered =
924				    chk->rec.data.TSN_seq - 1;
925				asoc->str_of_pdapi =
926				    chk->rec.data.stream_number;
927				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
928				asoc->pdapi_ppid = chk->rec.data.payloadtype;
929				asoc->fragment_flags = chk->rec.data.rcv_flags;
930				sctp_service_reassembly(stcb, asoc);
931			}
932		}
933	} else {
934		/*
935		 * Service re-assembly will deliver stream data queued at
936		 * the end of fragmented delivery.. but it wont know to go
937		 * back and call itself again... we do that here with the
938		 * got doit_again
939		 */
940		sctp_service_reassembly(stcb, asoc);
941		if (asoc->fragmented_delivery_inprogress == 0) {
942			/*
943			 * finished our Fragmented delivery, could be more
944			 * waiting?
945			 */
946			goto doit_again;
947		}
948	}
949}
950
951/*
952 * Dump onto the re-assembly queue, in its proper place. After dumping on the
953 * queue, see if anthing can be delivered. If so pull it off (or as much as
954 * we can. If we run out of space then we must dump what we can and set the
955 * appropriate flag to say we queued what we could.
956 */
957static void
958sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
959    struct sctp_tmit_chunk *chk, int *abort_flag)
960{
961	struct mbuf *oper;
962	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
963	u_char last_flags;
964	struct sctp_tmit_chunk *at, *prev, *next;
965
966	prev = next = NULL;
967	cum_ackp1 = asoc->tsn_last_delivered + 1;
968	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
969		/* This is the first one on the queue */
970		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
971		/*
972		 * we do not check for delivery of anything when only one
973		 * fragment is here
974		 */
975		asoc->size_on_reasm_queue = chk->send_size;
976		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
977		if (chk->rec.data.TSN_seq == cum_ackp1) {
978			if (asoc->fragmented_delivery_inprogress == 0 &&
979			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
980			    SCTP_DATA_FIRST_FRAG) {
981				/*
982				 * An empty queue, no delivery inprogress,
983				 * we hit the next one and it does NOT have
984				 * a FIRST fragment mark.
985				 */
986				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
987				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
988				    0, M_DONTWAIT, 1, MT_DATA);
989
990				if (oper) {
991					struct sctp_paramhdr *ph;
992					uint32_t *ippp;
993
994					SCTP_BUF_LEN(oper) =
995					    sizeof(struct sctp_paramhdr) +
996					    (sizeof(uint32_t) * 3);
997					ph = mtod(oper, struct sctp_paramhdr *);
998					ph->param_type =
999					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1000					ph->param_length = htons(SCTP_BUF_LEN(oper));
1001					ippp = (uint32_t *) (ph + 1);
1002					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
1003					ippp++;
1004					*ippp = chk->rec.data.TSN_seq;
1005					ippp++;
1006					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1007
1008				}
1009				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
1010				sctp_abort_an_association(stcb->sctp_ep, stcb,
1011				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1012				*abort_flag = 1;
1013			} else if (asoc->fragmented_delivery_inprogress &&
1014			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1015				/*
1016				 * We are doing a partial delivery and the
1017				 * NEXT chunk MUST be either the LAST or
1018				 * MIDDLE fragment NOT a FIRST
1019				 */
1020				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1021				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1022				    0, M_DONTWAIT, 1, MT_DATA);
1023				if (oper) {
1024					struct sctp_paramhdr *ph;
1025					uint32_t *ippp;
1026
1027					SCTP_BUF_LEN(oper) =
1028					    sizeof(struct sctp_paramhdr) +
1029					    (3 * sizeof(uint32_t));
1030					ph = mtod(oper, struct sctp_paramhdr *);
1031					ph->param_type =
1032					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1033					ph->param_length = htons(SCTP_BUF_LEN(oper));
1034					ippp = (uint32_t *) (ph + 1);
1035					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
1036					ippp++;
1037					*ippp = chk->rec.data.TSN_seq;
1038					ippp++;
1039					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1040				}
1041				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
1042				sctp_abort_an_association(stcb->sctp_ep, stcb,
1043				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1044				*abort_flag = 1;
1045			} else if (asoc->fragmented_delivery_inprogress) {
1046				/*
1047				 * Here we are ok with a MIDDLE or LAST
1048				 * piece
1049				 */
1050				if (chk->rec.data.stream_number !=
1051				    asoc->str_of_pdapi) {
1052					/* Got to be the right STR No */
1053					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
1054					    chk->rec.data.stream_number,
1055					    asoc->str_of_pdapi);
1056					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1057					    0, M_DONTWAIT, 1, MT_DATA);
1058					if (oper) {
1059						struct sctp_paramhdr *ph;
1060						uint32_t *ippp;
1061
1062						SCTP_BUF_LEN(oper) =
1063						    sizeof(struct sctp_paramhdr) +
1064						    (sizeof(uint32_t) * 3);
1065						ph = mtod(oper,
1066						    struct sctp_paramhdr *);
1067						ph->param_type =
1068						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1069						ph->param_length =
1070						    htons(SCTP_BUF_LEN(oper));
1071						ippp = (uint32_t *) (ph + 1);
1072						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1073						ippp++;
1074						*ippp = chk->rec.data.TSN_seq;
1075						ippp++;
1076						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1077					}
1078					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
1079					sctp_abort_an_association(stcb->sctp_ep,
1080					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1081					*abort_flag = 1;
1082				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1083					    SCTP_DATA_UNORDERED &&
1084					    chk->rec.data.stream_seq !=
1085				    asoc->ssn_of_pdapi) {
1086					/* Got to be the right STR Seq */
1087					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1088					    chk->rec.data.stream_seq,
1089					    asoc->ssn_of_pdapi);
1090					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1091					    0, M_DONTWAIT, 1, MT_DATA);
1092					if (oper) {
1093						struct sctp_paramhdr *ph;
1094						uint32_t *ippp;
1095
1096						SCTP_BUF_LEN(oper) =
1097						    sizeof(struct sctp_paramhdr) +
1098						    (3 * sizeof(uint32_t));
1099						ph = mtod(oper,
1100						    struct sctp_paramhdr *);
1101						ph->param_type =
1102						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1103						ph->param_length =
1104						    htons(SCTP_BUF_LEN(oper));
1105						ippp = (uint32_t *) (ph + 1);
1106						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1107						ippp++;
1108						*ippp = chk->rec.data.TSN_seq;
1109						ippp++;
1110						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1111
1112					}
1113					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1114					sctp_abort_an_association(stcb->sctp_ep,
1115					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1116					*abort_flag = 1;
1117				}
1118			}
1119		}
1120		return;
1121	}
1122	/* Find its place */
1123	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1124		if (compare_with_wrap(at->rec.data.TSN_seq,
1125		    chk->rec.data.TSN_seq, MAX_TSN)) {
1126			/*
1127			 * one in queue is bigger than the new one, insert
1128			 * before this one
1129			 */
1130			/* A check */
1131			asoc->size_on_reasm_queue += chk->send_size;
1132			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1133			next = at;
1134			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1135			break;
1136		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1137			/* Gak, He sent me a duplicate str seq number */
1138			/*
1139			 * foo bar, I guess I will just free this new guy,
1140			 * should we abort too? FIX ME MAYBE? Or it COULD be
1141			 * that the SSN's have wrapped. Maybe I should
1142			 * compare to TSN somehow... sigh for now just blow
1143			 * away the chunk!
1144			 */
1145			if (chk->data) {
1146				sctp_m_freem(chk->data);
1147				chk->data = NULL;
1148			}
1149			sctp_free_a_chunk(stcb, chk);
1150			return;
1151		} else {
1152			last_flags = at->rec.data.rcv_flags;
1153			last_tsn = at->rec.data.TSN_seq;
1154			prev = at;
1155			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1156				/*
1157				 * We are at the end, insert it after this
1158				 * one
1159				 */
1160				/* check it first */
1161				asoc->size_on_reasm_queue += chk->send_size;
1162				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1163				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1164				break;
1165			}
1166		}
1167	}
1168	/* Now the audits */
1169	if (prev) {
1170		prev_tsn = chk->rec.data.TSN_seq - 1;
1171		if (prev_tsn == prev->rec.data.TSN_seq) {
1172			/*
1173			 * Ok the one I am dropping onto the end is the
1174			 * NEXT. A bit of valdiation here.
1175			 */
1176			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1177			    SCTP_DATA_FIRST_FRAG ||
1178			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1179			    SCTP_DATA_MIDDLE_FRAG) {
1180				/*
1181				 * Insert chk MUST be a MIDDLE or LAST
1182				 * fragment
1183				 */
1184				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1185				    SCTP_DATA_FIRST_FRAG) {
1186					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1187					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1188					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1189					    0, M_DONTWAIT, 1, MT_DATA);
1190					if (oper) {
1191						struct sctp_paramhdr *ph;
1192						uint32_t *ippp;
1193
1194						SCTP_BUF_LEN(oper) =
1195						    sizeof(struct sctp_paramhdr) +
1196						    (3 * sizeof(uint32_t));
1197						ph = mtod(oper,
1198						    struct sctp_paramhdr *);
1199						ph->param_type =
1200						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1201						ph->param_length =
1202						    htons(SCTP_BUF_LEN(oper));
1203						ippp = (uint32_t *) (ph + 1);
1204						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1205						ippp++;
1206						*ippp = chk->rec.data.TSN_seq;
1207						ippp++;
1208						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1209
1210					}
1211					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1212					sctp_abort_an_association(stcb->sctp_ep,
1213					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1214					*abort_flag = 1;
1215					return;
1216				}
1217				if (chk->rec.data.stream_number !=
1218				    prev->rec.data.stream_number) {
1219					/*
1220					 * Huh, need the correct STR here,
1221					 * they must be the same.
1222					 */
1223					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1224					    chk->rec.data.stream_number,
1225					    prev->rec.data.stream_number);
1226					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1227					    0, M_DONTWAIT, 1, MT_DATA);
1228					if (oper) {
1229						struct sctp_paramhdr *ph;
1230						uint32_t *ippp;
1231
1232						SCTP_BUF_LEN(oper) =
1233						    sizeof(struct sctp_paramhdr) +
1234						    (3 * sizeof(uint32_t));
1235						ph = mtod(oper,
1236						    struct sctp_paramhdr *);
1237						ph->param_type =
1238						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1239						ph->param_length =
1240						    htons(SCTP_BUF_LEN(oper));
1241						ippp = (uint32_t *) (ph + 1);
1242						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1243						ippp++;
1244						*ippp = chk->rec.data.TSN_seq;
1245						ippp++;
1246						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1247					}
1248					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1249					sctp_abort_an_association(stcb->sctp_ep,
1250					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1251
1252					*abort_flag = 1;
1253					return;
1254				}
1255				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1256				    chk->rec.data.stream_seq !=
1257				    prev->rec.data.stream_seq) {
1258					/*
1259					 * Huh, need the correct STR here,
1260					 * they must be the same.
1261					 */
1262					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1263					    chk->rec.data.stream_seq,
1264					    prev->rec.data.stream_seq);
1265					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1266					    0, M_DONTWAIT, 1, MT_DATA);
1267					if (oper) {
1268						struct sctp_paramhdr *ph;
1269						uint32_t *ippp;
1270
1271						SCTP_BUF_LEN(oper) =
1272						    sizeof(struct sctp_paramhdr) +
1273						    (3 * sizeof(uint32_t));
1274						ph = mtod(oper,
1275						    struct sctp_paramhdr *);
1276						ph->param_type =
1277						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1278						ph->param_length =
1279						    htons(SCTP_BUF_LEN(oper));
1280						ippp = (uint32_t *) (ph + 1);
1281						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1282						ippp++;
1283						*ippp = chk->rec.data.TSN_seq;
1284						ippp++;
1285						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1286					}
1287					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1288					sctp_abort_an_association(stcb->sctp_ep,
1289					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1290
1291					*abort_flag = 1;
1292					return;
1293				}
1294			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1295			    SCTP_DATA_LAST_FRAG) {
1296				/* Insert chk MUST be a FIRST */
1297				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1298				    SCTP_DATA_FIRST_FRAG) {
1299					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1300					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1301					    0, M_DONTWAIT, 1, MT_DATA);
1302					if (oper) {
1303						struct sctp_paramhdr *ph;
1304						uint32_t *ippp;
1305
1306						SCTP_BUF_LEN(oper) =
1307						    sizeof(struct sctp_paramhdr) +
1308						    (3 * sizeof(uint32_t));
1309						ph = mtod(oper,
1310						    struct sctp_paramhdr *);
1311						ph->param_type =
1312						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1313						ph->param_length =
1314						    htons(SCTP_BUF_LEN(oper));
1315						ippp = (uint32_t *) (ph + 1);
1316						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1317						ippp++;
1318						*ippp = chk->rec.data.TSN_seq;
1319						ippp++;
1320						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1321
1322					}
1323					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1324					sctp_abort_an_association(stcb->sctp_ep,
1325					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1326
1327					*abort_flag = 1;
1328					return;
1329				}
1330			}
1331		}
1332	}
1333	if (next) {
1334		post_tsn = chk->rec.data.TSN_seq + 1;
1335		if (post_tsn == next->rec.data.TSN_seq) {
1336			/*
1337			 * Ok the one I am inserting ahead of is my NEXT
1338			 * one. A bit of valdiation here.
1339			 */
1340			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1341				/* Insert chk MUST be a last fragment */
1342				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1343				    != SCTP_DATA_LAST_FRAG) {
1344					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1345					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1346					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1347					    0, M_DONTWAIT, 1, MT_DATA);
1348					if (oper) {
1349						struct sctp_paramhdr *ph;
1350						uint32_t *ippp;
1351
1352						SCTP_BUF_LEN(oper) =
1353						    sizeof(struct sctp_paramhdr) +
1354						    (3 * sizeof(uint32_t));
1355						ph = mtod(oper,
1356						    struct sctp_paramhdr *);
1357						ph->param_type =
1358						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1359						ph->param_length =
1360						    htons(SCTP_BUF_LEN(oper));
1361						ippp = (uint32_t *) (ph + 1);
1362						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1363						ippp++;
1364						*ippp = chk->rec.data.TSN_seq;
1365						ippp++;
1366						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1367					}
1368					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1369					sctp_abort_an_association(stcb->sctp_ep,
1370					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1371
1372					*abort_flag = 1;
1373					return;
1374				}
1375			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1376				    SCTP_DATA_MIDDLE_FRAG ||
1377				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1378			    SCTP_DATA_LAST_FRAG) {
1379				/*
1380				 * Insert chk CAN be MIDDLE or FIRST NOT
1381				 * LAST
1382				 */
1383				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1384				    SCTP_DATA_LAST_FRAG) {
1385					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1386					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1387					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1388					    0, M_DONTWAIT, 1, MT_DATA);
1389					if (oper) {
1390						struct sctp_paramhdr *ph;
1391						uint32_t *ippp;
1392
1393						SCTP_BUF_LEN(oper) =
1394						    sizeof(struct sctp_paramhdr) +
1395						    (3 * sizeof(uint32_t));
1396						ph = mtod(oper,
1397						    struct sctp_paramhdr *);
1398						ph->param_type =
1399						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1400						ph->param_length =
1401						    htons(SCTP_BUF_LEN(oper));
1402						ippp = (uint32_t *) (ph + 1);
1403						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1404						ippp++;
1405						*ippp = chk->rec.data.TSN_seq;
1406						ippp++;
1407						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1408
1409					}
1410					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1411					sctp_abort_an_association(stcb->sctp_ep,
1412					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1413
1414					*abort_flag = 1;
1415					return;
1416				}
1417				if (chk->rec.data.stream_number !=
1418				    next->rec.data.stream_number) {
1419					/*
1420					 * Huh, need the correct STR here,
1421					 * they must be the same.
1422					 */
1423					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1424					    chk->rec.data.stream_number,
1425					    next->rec.data.stream_number);
1426					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1427					    0, M_DONTWAIT, 1, MT_DATA);
1428					if (oper) {
1429						struct sctp_paramhdr *ph;
1430						uint32_t *ippp;
1431
1432						SCTP_BUF_LEN(oper) =
1433						    sizeof(struct sctp_paramhdr) +
1434						    (3 * sizeof(uint32_t));
1435						ph = mtod(oper,
1436						    struct sctp_paramhdr *);
1437						ph->param_type =
1438						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1439						ph->param_length =
1440						    htons(SCTP_BUF_LEN(oper));
1441						ippp = (uint32_t *) (ph + 1);
1442						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1443						ippp++;
1444						*ippp = chk->rec.data.TSN_seq;
1445						ippp++;
1446						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1447
1448					}
1449					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1450					sctp_abort_an_association(stcb->sctp_ep,
1451					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1452
1453					*abort_flag = 1;
1454					return;
1455				}
1456				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1457				    chk->rec.data.stream_seq !=
1458				    next->rec.data.stream_seq) {
1459					/*
1460					 * Huh, need the correct STR here,
1461					 * they must be the same.
1462					 */
1463					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1464					    chk->rec.data.stream_seq,
1465					    next->rec.data.stream_seq);
1466					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1467					    0, M_DONTWAIT, 1, MT_DATA);
1468					if (oper) {
1469						struct sctp_paramhdr *ph;
1470						uint32_t *ippp;
1471
1472						SCTP_BUF_LEN(oper) =
1473						    sizeof(struct sctp_paramhdr) +
1474						    (3 * sizeof(uint32_t));
1475						ph = mtod(oper,
1476						    struct sctp_paramhdr *);
1477						ph->param_type =
1478						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1479						ph->param_length =
1480						    htons(SCTP_BUF_LEN(oper));
1481						ippp = (uint32_t *) (ph + 1);
1482						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1483						ippp++;
1484						*ippp = chk->rec.data.TSN_seq;
1485						ippp++;
1486						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1487					}
1488					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1489					sctp_abort_an_association(stcb->sctp_ep,
1490					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1491
1492					*abort_flag = 1;
1493					return;
1494				}
1495			}
1496		}
1497	}
1498	/* Do we need to do some delivery? check */
1499	sctp_deliver_reasm_check(stcb, asoc);
1500}
1501
1502/*
1503 * This is an unfortunate routine. It checks to make sure a evil guy is not
1504 * stuffing us full of bad packet fragments. A broken peer could also do this
1505 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1506 * :< more cycles.
1507 */
1508static int
1509sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1510    uint32_t TSN_seq)
1511{
1512	struct sctp_tmit_chunk *at;
1513	uint32_t tsn_est;
1514
1515	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1516		if (compare_with_wrap(TSN_seq,
1517		    at->rec.data.TSN_seq, MAX_TSN)) {
1518			/* is it one bigger? */
1519			tsn_est = at->rec.data.TSN_seq + 1;
1520			if (tsn_est == TSN_seq) {
1521				/* yep. It better be a last then */
1522				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1523				    SCTP_DATA_LAST_FRAG) {
1524					/*
1525					 * Ok this guy belongs next to a guy
1526					 * that is NOT last, it should be a
1527					 * middle/last, not a complete
1528					 * chunk.
1529					 */
1530					return (1);
1531				} else {
1532					/*
1533					 * This guy is ok since its a LAST
1534					 * and the new chunk is a fully
1535					 * self- contained one.
1536					 */
1537					return (0);
1538				}
1539			}
1540		} else if (TSN_seq == at->rec.data.TSN_seq) {
1541			/* Software error since I have a dup? */
1542			return (1);
1543		} else {
1544			/*
1545			 * Ok, 'at' is larger than new chunk but does it
1546			 * need to be right before it.
1547			 */
1548			tsn_est = TSN_seq + 1;
1549			if (tsn_est == at->rec.data.TSN_seq) {
1550				/* Yep, It better be a first */
1551				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1552				    SCTP_DATA_FIRST_FRAG) {
1553					return (1);
1554				} else {
1555					return (0);
1556				}
1557			}
1558		}
1559	}
1560	return (0);
1561}
1562
1563
1564static int
1565sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1566    struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1567    struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1568    int *break_flag, int last_chunk)
1569{
1570	/* Process a data chunk */
1571	/* struct sctp_tmit_chunk *chk; */
1572	struct sctp_tmit_chunk *chk;
1573	uint32_t tsn, gap;
1574
1575	/* EY - for nr_sack */
1576	uint32_t nr_gap;
1577	struct mbuf *dmbuf;
1578	int indx, the_len;
1579	int need_reasm_check = 0;
1580	uint16_t strmno, strmseq;
1581	struct mbuf *oper;
1582	struct sctp_queued_to_read *control;
1583	int ordered;
1584	uint32_t protocol_id;
1585	uint8_t chunk_flags;
1586	struct sctp_stream_reset_list *liste;
1587
1588	chk = NULL;
1589	tsn = ntohl(ch->dp.tsn);
1590	chunk_flags = ch->ch.chunk_flags;
1591	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1592		asoc->send_sack = 1;
1593	}
1594	protocol_id = ch->dp.protocol_id;
1595	ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1596	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1597		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1598	}
1599	if (stcb == NULL) {
1600		return (0);
1601	}
1602	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1603	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1604	    asoc->cumulative_tsn == tsn) {
1605		/* It is a duplicate */
1606		SCTP_STAT_INCR(sctps_recvdupdata);
1607		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1608			/* Record a dup for the next outbound sack */
1609			asoc->dup_tsns[asoc->numduptsns] = tsn;
1610			asoc->numduptsns++;
1611		}
1612		asoc->send_sack = 1;
1613		return (0);
1614	}
1615	/* Calculate the number of TSN's between the base and this TSN */
1616	if (tsn >= asoc->mapping_array_base_tsn) {
1617		gap = tsn - asoc->mapping_array_base_tsn;
1618	} else {
1619		gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1620	}
1621	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1622		/* Can't hold the bit in the mapping at max array, toss it */
1623		return (0);
1624	}
1625	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1626		SCTP_TCB_LOCK_ASSERT(stcb);
1627		if (sctp_expand_mapping_array(asoc, gap)) {
1628			/* Can't expand, drop it */
1629			return (0);
1630		}
1631	}
1632	/* EY - for nr_sack */
1633	nr_gap = gap;
1634
1635	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1636		*high_tsn = tsn;
1637	}
1638	/* See if we have received this one already */
1639	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1640		SCTP_STAT_INCR(sctps_recvdupdata);
1641		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1642			/* Record a dup for the next outbound sack */
1643			asoc->dup_tsns[asoc->numduptsns] = tsn;
1644			asoc->numduptsns++;
1645		}
1646		asoc->send_sack = 1;
1647		return (0);
1648	}
1649	/*
1650	 * Check to see about the GONE flag, duplicates would cause a sack
1651	 * to be sent up above
1652	 */
1653	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1654	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1655	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1656	    ) {
1657		/*
1658		 * wait a minute, this guy is gone, there is no longer a
1659		 * receiver. Send peer an ABORT!
1660		 */
1661		struct mbuf *op_err;
1662
1663		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1664		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1665		*abort_flag = 1;
1666		return (0);
1667	}
1668	/*
1669	 * Now before going further we see if there is room. If NOT then we
1670	 * MAY let one through only IF this TSN is the one we are waiting
1671	 * for on a partial delivery API.
1672	 */
1673
1674	/* now do the tests */
1675	if (((asoc->cnt_on_all_streams +
1676	    asoc->cnt_on_reasm_queue +
1677	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1678	    (((int)asoc->my_rwnd) <= 0)) {
1679		/*
1680		 * When we have NO room in the rwnd we check to make sure
1681		 * the reader is doing its job...
1682		 */
1683		if (stcb->sctp_socket->so_rcv.sb_cc) {
1684			/* some to read, wake-up */
1685#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1686			struct socket *so;
1687
1688			so = SCTP_INP_SO(stcb->sctp_ep);
1689			atomic_add_int(&stcb->asoc.refcnt, 1);
1690			SCTP_TCB_UNLOCK(stcb);
1691			SCTP_SOCKET_LOCK(so, 1);
1692			SCTP_TCB_LOCK(stcb);
1693			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1694			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1695				/* assoc was freed while we were unlocked */
1696				SCTP_SOCKET_UNLOCK(so, 1);
1697				return (0);
1698			}
1699#endif
1700			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1701#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1702			SCTP_SOCKET_UNLOCK(so, 1);
1703#endif
1704		}
1705		/* now is it in the mapping array of what we have accepted? */
1706		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1707			/* Nope not in the valid range dump it */
1708			sctp_set_rwnd(stcb, asoc);
1709			if ((asoc->cnt_on_all_streams +
1710			    asoc->cnt_on_reasm_queue +
1711			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1712				SCTP_STAT_INCR(sctps_datadropchklmt);
1713			} else {
1714				SCTP_STAT_INCR(sctps_datadroprwnd);
1715			}
1716			indx = *break_flag;
1717			*break_flag = 1;
1718			return (0);
1719		}
1720	}
1721	strmno = ntohs(ch->dp.stream_id);
1722	if (strmno >= asoc->streamincnt) {
1723		struct sctp_paramhdr *phdr;
1724		struct mbuf *mb;
1725
1726		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1727		    0, M_DONTWAIT, 1, MT_DATA);
1728		if (mb != NULL) {
1729			/* add some space up front so prepend will work well */
1730			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1731			phdr = mtod(mb, struct sctp_paramhdr *);
1732			/*
1733			 * Error causes are just param's and this one has
1734			 * two back to back phdr, one with the error type
1735			 * and size, the other with the streamid and a rsvd
1736			 */
1737			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1738			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1739			phdr->param_length =
1740			    htons(sizeof(struct sctp_paramhdr) * 2);
1741			phdr++;
1742			/* We insert the stream in the type field */
1743			phdr->param_type = ch->dp.stream_id;
1744			/* And set the length to 0 for the rsvd field */
1745			phdr->param_length = 0;
1746			sctp_queue_op_err(stcb, mb);
1747		}
1748		SCTP_STAT_INCR(sctps_badsid);
1749		SCTP_TCB_LOCK_ASSERT(stcb);
1750		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1751		/* EY set this tsn present in  nr_sack's nr_mapping_array */
1752		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1753			SCTP_TCB_LOCK_ASSERT(stcb);
1754			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1755		}
1756		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1757			/* we have a new high score */
1758			asoc->highest_tsn_inside_map = tsn;
1759			/* EY nr_sack version of the above */
1760			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
1761				asoc->highest_tsn_inside_nr_map = tsn;
1762			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1763				sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1764			}
1765		}
1766		if (tsn == (asoc->cumulative_tsn + 1)) {
1767			/* Update cum-ack */
1768			asoc->cumulative_tsn = tsn;
1769		}
1770		return (0);
1771	}
1772	/*
1773	 * Before we continue lets validate that we are not being fooled by
1774	 * an evil attacker. We can only have 4k chunks based on our TSN
1775	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1776	 * way our stream sequence numbers could have wrapped. We of course
1777	 * only validate the FIRST fragment so the bit must be set.
1778	 */
1779	strmseq = ntohs(ch->dp.stream_sequence);
1780#ifdef SCTP_ASOCLOG_OF_TSNS
1781	SCTP_TCB_LOCK_ASSERT(stcb);
1782	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1783		asoc->tsn_in_at = 0;
1784		asoc->tsn_in_wrapped = 1;
1785	}
1786	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1787	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1788	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1789	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1790	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1791	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1792	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1793	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1794	asoc->tsn_in_at++;
1795#endif
1796	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1797	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1798	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1799	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1800	    strmseq, MAX_SEQ) ||
1801	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1802		/* The incoming sseq is behind where we last delivered? */
1803		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1804		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1805		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1806		    0, M_DONTWAIT, 1, MT_DATA);
1807		if (oper) {
1808			struct sctp_paramhdr *ph;
1809			uint32_t *ippp;
1810
1811			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1812			    (3 * sizeof(uint32_t));
1813			ph = mtod(oper, struct sctp_paramhdr *);
1814			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1815			ph->param_length = htons(SCTP_BUF_LEN(oper));
1816			ippp = (uint32_t *) (ph + 1);
1817			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1818			ippp++;
1819			*ippp = tsn;
1820			ippp++;
1821			*ippp = ((strmno << 16) | strmseq);
1822
1823		}
1824		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1825		sctp_abort_an_association(stcb->sctp_ep, stcb,
1826		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1827		*abort_flag = 1;
1828		return (0);
1829	}
1830	/************************************
1831	 * From here down we may find ch-> invalid
1832	 * so its a good idea NOT to use it.
1833	 *************************************/
1834
1835	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1836	if (last_chunk == 0) {
1837		dmbuf = SCTP_M_COPYM(*m,
1838		    (offset + sizeof(struct sctp_data_chunk)),
1839		    the_len, M_DONTWAIT);
1840#ifdef SCTP_MBUF_LOGGING
1841		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1842			struct mbuf *mat;
1843
1844			mat = dmbuf;
1845			while (mat) {
1846				if (SCTP_BUF_IS_EXTENDED(mat)) {
1847					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1848				}
1849				mat = SCTP_BUF_NEXT(mat);
1850			}
1851		}
1852#endif
1853	} else {
1854		/* We can steal the last chunk */
1855		int l_len;
1856
1857		dmbuf = *m;
1858		/* lop off the top part */
1859		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1860		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1861			l_len = SCTP_BUF_LEN(dmbuf);
1862		} else {
1863			/*
1864			 * need to count up the size hopefully does not hit
1865			 * this to often :-0
1866			 */
1867			struct mbuf *lat;
1868
1869			l_len = 0;
1870			lat = dmbuf;
1871			while (lat) {
1872				l_len += SCTP_BUF_LEN(lat);
1873				lat = SCTP_BUF_NEXT(lat);
1874			}
1875		}
1876		if (l_len > the_len) {
1877			/* Trim the end round bytes off  too */
1878			m_adj(dmbuf, -(l_len - the_len));
1879		}
1880	}
1881	if (dmbuf == NULL) {
1882		SCTP_STAT_INCR(sctps_nomem);
1883		return (0);
1884	}
1885	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1886	    asoc->fragmented_delivery_inprogress == 0 &&
1887	    TAILQ_EMPTY(&asoc->resetHead) &&
1888	    ((ordered == 0) ||
1889	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1890	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1891		/* Candidate for express delivery */
1892		/*
1893		 * Its not fragmented, No PD-API is up, Nothing in the
1894		 * delivery queue, Its un-ordered OR ordered and the next to
1895		 * deliver AND nothing else is stuck on the stream queue,
1896		 * And there is room for it in the socket buffer. Lets just
1897		 * stuff it up the buffer....
1898		 */
1899
1900		/* It would be nice to avoid this copy if we could :< */
1901		sctp_alloc_a_readq(stcb, control);
1902		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1903		    protocol_id,
1904		    stcb->asoc.context,
1905		    strmno, strmseq,
1906		    chunk_flags,
1907		    dmbuf);
1908		if (control == NULL) {
1909			goto failed_express_del;
1910		}
1911		sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
1912
1913		/*
1914		 * EY here I should check if this delivered tsn is
1915		 * out_of_order, if yes then update the nr_map
1916		 */
1917		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1918			/*
1919			 * EY check if the mapping_array and nr_mapping
1920			 * array are consistent
1921			 */
1922			if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
1923				/*
1924				 * printf("EY-IN
1925				 * sctp_process_a_data_chunk(5): Something
1926				 * is wrong the map base tsn" "\nEY-and
1927				 * nr_map base tsn should be equal.");
1928				 */
1929				/* EY debugging block */
1930			{
1931				/*
1932				 * printf("\nEY-Calculating an
1933				 * nr_gap!!\nmapping_array_size = %d
1934				 * nr_mapping_array_size = %d"
1935				 * "\nEY-mapping_array_base = %d
1936				 * nr_mapping_array_base =
1937				 * %d\nEY-highest_tsn_inside_map = %d"
1938				 * "highest_tsn_inside_nr_map = %d\nEY-TSN =
1939				 * %d nr_gap = %d",asoc->mapping_array_size,
1940				 * asoc->nr_mapping_array_size,
1941				 * asoc->mapping_array_base_tsn,
1942				 * asoc->nr_mapping_array_base_tsn,
1943				 * asoc->highest_tsn_inside_map,
1944				 * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
1945				 * );
1946				 */
1947			}
1948			/* EY - not %100 sure about the lock thing */
1949			SCTP_TCB_LOCK_ASSERT(stcb);
1950			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
1951			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
1952				asoc->highest_tsn_inside_nr_map = tsn;
1953		}
1954		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1955			/* for ordered, bump what we delivered */
1956			asoc->strmin[strmno].last_sequence_delivered++;
1957		}
1958		SCTP_STAT_INCR(sctps_recvexpress);
1959		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1960			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1961			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1962		}
1963		control = NULL;
1964		goto finish_express_del;
1965	}
1966failed_express_del:
1967	/* If we reach here this is a new chunk */
1968	chk = NULL;
1969	control = NULL;
1970	/* Express for fragmented delivery? */
1971	if ((asoc->fragmented_delivery_inprogress) &&
1972	    (stcb->asoc.control_pdapi) &&
1973	    (asoc->str_of_pdapi == strmno) &&
1974	    (asoc->ssn_of_pdapi == strmseq)
1975	    ) {
1976		control = stcb->asoc.control_pdapi;
1977		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1978			/* Can't be another first? */
1979			goto failed_pdapi_express_del;
1980		}
1981		if (tsn == (control->sinfo_tsn + 1)) {
1982			/* Yep, we can add it on */
1983			int end = 0;
1984			uint32_t cumack;
1985
1986			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1987				end = 1;
1988			}
1989			cumack = asoc->cumulative_tsn;
1990			if ((cumack + 1) == tsn)
1991				cumack = tsn;
1992
1993			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1994			    tsn,
1995			    &stcb->sctp_socket->so_rcv)) {
1996				SCTP_PRINTF("Append fails end:%d\n", end);
1997				goto failed_pdapi_express_del;
1998			}
1999			/*
2000			 * EY It is appended to the read queue in prev if
2001			 * block here I should check if this delivered tsn
2002			 * is out_of_order, if yes then update the nr_map
2003			 */
2004			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2005				/* EY debugging block */
2006				{
2007					/*
2008					 * printf("\nEY-Calculating an
2009					 * nr_gap!!\nEY-mapping_array_size =
2010					 * %d nr_mapping_array_size = %d"
2011					 * "\nEY-mapping_array_base = %d
2012					 * nr_mapping_array_base =
2013					 * %d\nEY-highest_tsn_inside_map =
2014					 * %d" "highest_tsn_inside_nr_map =
2015					 * %d\nEY-TSN = %d nr_gap =
2016					 * %d",asoc->mapping_array_size,
2017					 * asoc->nr_mapping_array_size,
2018					 * asoc->mapping_array_base_tsn,
2019					 * asoc->nr_mapping_array_base_tsn,
2020					 * asoc->highest_tsn_inside_map,
2021					 * asoc->highest_tsn_inside_nr_map,ts
2022					 * n,nr_gap);
2023					 */
2024				}
2025				/* EY - not %100 sure about the lock thing */
2026				SCTP_TCB_LOCK_ASSERT(stcb);
2027				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2028				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2029					asoc->highest_tsn_inside_nr_map = tsn;
2030			}
2031			SCTP_STAT_INCR(sctps_recvexpressm);
2032			control->sinfo_tsn = tsn;
2033			asoc->tsn_last_delivered = tsn;
2034			asoc->fragment_flags = chunk_flags;
2035			asoc->tsn_of_pdapi_last_delivered = tsn;
2036			asoc->last_flags_delivered = chunk_flags;
2037			asoc->last_strm_seq_delivered = strmseq;
2038			asoc->last_strm_no_delivered = strmno;
2039			if (end) {
2040				/* clean up the flags and such */
2041				asoc->fragmented_delivery_inprogress = 0;
2042				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2043					asoc->strmin[strmno].last_sequence_delivered++;
2044				}
2045				stcb->asoc.control_pdapi = NULL;
2046				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
2047					/*
2048					 * There could be another message
2049					 * ready
2050					 */
2051					need_reasm_check = 1;
2052				}
2053			}
2054			control = NULL;
2055			goto finish_express_del;
2056		}
2057	}
2058failed_pdapi_express_del:
2059	control = NULL;
2060	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2061		sctp_alloc_a_chunk(stcb, chk);
2062		if (chk == NULL) {
2063			/* No memory so we drop the chunk */
2064			SCTP_STAT_INCR(sctps_nomem);
2065			if (last_chunk == 0) {
2066				/* we copied it, free the copy */
2067				sctp_m_freem(dmbuf);
2068			}
2069			return (0);
2070		}
2071		chk->rec.data.TSN_seq = tsn;
2072		chk->no_fr_allowed = 0;
2073		chk->rec.data.stream_seq = strmseq;
2074		chk->rec.data.stream_number = strmno;
2075		chk->rec.data.payloadtype = protocol_id;
2076		chk->rec.data.context = stcb->asoc.context;
2077		chk->rec.data.doing_fast_retransmit = 0;
2078		chk->rec.data.rcv_flags = chunk_flags;
2079		chk->asoc = asoc;
2080		chk->send_size = the_len;
2081		chk->whoTo = net;
2082		atomic_add_int(&net->ref_count, 1);
2083		chk->data = dmbuf;
2084	} else {
2085		sctp_alloc_a_readq(stcb, control);
2086		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2087		    protocol_id,
2088		    stcb->asoc.context,
2089		    strmno, strmseq,
2090		    chunk_flags,
2091		    dmbuf);
2092		if (control == NULL) {
2093			/* No memory so we drop the chunk */
2094			SCTP_STAT_INCR(sctps_nomem);
2095			if (last_chunk == 0) {
2096				/* we copied it, free the copy */
2097				sctp_m_freem(dmbuf);
2098			}
2099			return (0);
2100		}
2101		control->length = the_len;
2102	}
2103
2104	/* Mark it as received */
2105	/* Now queue it where it belongs */
2106	if (control != NULL) {
2107		/* First a sanity check */
2108		if (asoc->fragmented_delivery_inprogress) {
2109			/*
2110			 * Ok, we have a fragmented delivery in progress if
2111			 * this chunk is next to deliver OR belongs in our
2112			 * view to the reassembly, the peer is evil or
2113			 * broken.
2114			 */
2115			uint32_t estimate_tsn;
2116
2117			estimate_tsn = asoc->tsn_last_delivered + 1;
2118			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2119			    (estimate_tsn == control->sinfo_tsn)) {
2120				/* Evil/Broke peer */
2121				sctp_m_freem(control->data);
2122				control->data = NULL;
2123				if (control->whoFrom) {
2124					sctp_free_remote_addr(control->whoFrom);
2125					control->whoFrom = NULL;
2126				}
2127				sctp_free_a_readq(stcb, control);
2128				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2129				    0, M_DONTWAIT, 1, MT_DATA);
2130				if (oper) {
2131					struct sctp_paramhdr *ph;
2132					uint32_t *ippp;
2133
2134					SCTP_BUF_LEN(oper) =
2135					    sizeof(struct sctp_paramhdr) +
2136					    (3 * sizeof(uint32_t));
2137					ph = mtod(oper, struct sctp_paramhdr *);
2138					ph->param_type =
2139					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2140					ph->param_length = htons(SCTP_BUF_LEN(oper));
2141					ippp = (uint32_t *) (ph + 1);
2142					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
2143					ippp++;
2144					*ippp = tsn;
2145					ippp++;
2146					*ippp = ((strmno << 16) | strmseq);
2147				}
2148				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
2149				sctp_abort_an_association(stcb->sctp_ep, stcb,
2150				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2151
2152				*abort_flag = 1;
2153				return (0);
2154			} else {
2155				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2156					sctp_m_freem(control->data);
2157					control->data = NULL;
2158					if (control->whoFrom) {
2159						sctp_free_remote_addr(control->whoFrom);
2160						control->whoFrom = NULL;
2161					}
2162					sctp_free_a_readq(stcb, control);
2163
2164					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2165					    0, M_DONTWAIT, 1, MT_DATA);
2166					if (oper) {
2167						struct sctp_paramhdr *ph;
2168						uint32_t *ippp;
2169
2170						SCTP_BUF_LEN(oper) =
2171						    sizeof(struct sctp_paramhdr) +
2172						    (3 * sizeof(uint32_t));
2173						ph = mtod(oper,
2174						    struct sctp_paramhdr *);
2175						ph->param_type =
2176						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2177						ph->param_length =
2178						    htons(SCTP_BUF_LEN(oper));
2179						ippp = (uint32_t *) (ph + 1);
2180						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
2181						ippp++;
2182						*ippp = tsn;
2183						ippp++;
2184						*ippp = ((strmno << 16) | strmseq);
2185					}
2186					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2187					sctp_abort_an_association(stcb->sctp_ep,
2188					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2189
2190					*abort_flag = 1;
2191					return (0);
2192				}
2193			}
2194		} else {
2195			/* No PDAPI running */
2196			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2197				/*
2198				 * Reassembly queue is NOT empty validate
2199				 * that this tsn does not need to be in
2200				 * reasembly queue. If it does then our peer
2201				 * is broken or evil.
2202				 */
2203				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2204					sctp_m_freem(control->data);
2205					control->data = NULL;
2206					if (control->whoFrom) {
2207						sctp_free_remote_addr(control->whoFrom);
2208						control->whoFrom = NULL;
2209					}
2210					sctp_free_a_readq(stcb, control);
2211					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2212					    0, M_DONTWAIT, 1, MT_DATA);
2213					if (oper) {
2214						struct sctp_paramhdr *ph;
2215						uint32_t *ippp;
2216
2217						SCTP_BUF_LEN(oper) =
2218						    sizeof(struct sctp_paramhdr) +
2219						    (3 * sizeof(uint32_t));
2220						ph = mtod(oper,
2221						    struct sctp_paramhdr *);
2222						ph->param_type =
2223						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2224						ph->param_length =
2225						    htons(SCTP_BUF_LEN(oper));
2226						ippp = (uint32_t *) (ph + 1);
2227						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2228						ippp++;
2229						*ippp = tsn;
2230						ippp++;
2231						*ippp = ((strmno << 16) | strmseq);
2232					}
2233					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2234					sctp_abort_an_association(stcb->sctp_ep,
2235					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2236
2237					*abort_flag = 1;
2238					return (0);
2239				}
2240			}
2241		}
2242		/* ok, if we reach here we have passed the sanity checks */
2243		if (chunk_flags & SCTP_DATA_UNORDERED) {
2244			/* queue directly into socket buffer */
2245			sctp_add_to_readq(stcb->sctp_ep, stcb,
2246			    control,
2247			    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2248
2249			/*
2250			 * EY It is added to the read queue in prev if block
2251			 * here I should check if this delivered tsn is
2252			 * out_of_order, if yes then update the nr_map
2253			 */
2254			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2255				/*
2256				 * EY check if the mapping_array and
2257				 * nr_mapping array are consistent
2258				 */
2259				if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
2260					/*
2261					 * printf("EY-IN
2262					 * sctp_process_a_data_chunk(6):
2263					 * Something is wrong the map base
2264					 * tsn" "\nEY-and nr_map base tsn
2265					 * should be equal.");
2266					 */
2267					/*
2268					 * EY - not %100 sure about the lock
2269					 * thing, i think we don't need the
2270					 * below,
2271					 */
2272					/* SCTP_TCB_LOCK_ASSERT(stcb); */
2273				{
2274					/*
2275					 * printf("\nEY-Calculating an
2276					 * nr_gap!!\nEY-mapping_array_size =
2277					 * %d nr_mapping_array_size = %d"
2278					 * "\nEY-mapping_array_base = %d
2279					 * nr_mapping_array_base =
2280					 * %d\nEY-highest_tsn_inside_map =
2281					 * %d" "highest_tsn_inside_nr_map =
2282					 * %d\nEY-TSN = %d nr_gap =
2283					 * %d",asoc->mapping_array_size,
2284					 * asoc->nr_mapping_array_size,
2285					 * asoc->mapping_array_base_tsn,
2286					 * asoc->nr_mapping_array_base_tsn,
2287					 * asoc->highest_tsn_inside_map,
2288					 * asoc->highest_tsn_inside_nr_map,ts
2289					 * n,nr_gap);
2290					 */
2291				}
2292				SCTP_TCB_LOCK_ASSERT(stcb);
2293				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2294				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2295					asoc->highest_tsn_inside_nr_map = tsn;
2296			}
2297		} else {
2298			/*
2299			 * Special check for when streams are resetting. We
2300			 * could be more smart about this and check the
2301			 * actual stream to see if it is not being reset..
2302			 * that way we would not create a HOLB when amongst
2303			 * streams being reset and those not being reset.
2304			 *
2305			 * We take complete messages that have a stream reset
2306			 * intervening (aka the TSN is after where our
2307			 * cum-ack needs to be) off and put them on a
2308			 * pending_reply_queue. The reassembly ones we do
2309			 * not have to worry about since they are all sorted
2310			 * and proceessed by TSN order. It is only the
2311			 * singletons I must worry about.
2312			 */
2313			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2314			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2315			    ) {
2316				/*
2317				 * yep its past where we need to reset... go
2318				 * ahead and queue it.
2319				 */
2320				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2321					/* first one on */
2322					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2323				} else {
2324					struct sctp_queued_to_read *ctlOn;
2325					unsigned char inserted = 0;
2326
2327					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2328					while (ctlOn) {
2329						if (compare_with_wrap(control->sinfo_tsn,
2330						    ctlOn->sinfo_tsn, MAX_TSN)) {
2331							ctlOn = TAILQ_NEXT(ctlOn, next);
2332						} else {
2333							/* found it */
2334							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2335							inserted = 1;
2336							break;
2337						}
2338					}
2339					if (inserted == 0) {
2340						/*
2341						 * must be put at end, use
2342						 * prevP (all setup from
2343						 * loop) to setup nextP.
2344						 */
2345						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2346					}
2347				}
2348			} else {
2349				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2350				if (*abort_flag) {
2351					return (0);
2352				}
2353			}
2354		}
2355	} else {
2356		/* Into the re-assembly queue */
2357		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2358		if (*abort_flag) {
2359			/*
2360			 * the assoc is now gone and chk was put onto the
2361			 * reasm queue, which has all been freed.
2362			 */
2363			*m = NULL;
2364			return (0);
2365		}
2366	}
2367finish_express_del:
2368	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2369		/* we have a new high score */
2370		asoc->highest_tsn_inside_map = tsn;
2371		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2372			sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2373		}
2374	}
2375	if (tsn == (asoc->cumulative_tsn + 1)) {
2376		/* Update cum-ack */
2377		asoc->cumulative_tsn = tsn;
2378	}
2379	if (last_chunk) {
2380		*m = NULL;
2381	}
2382	if (ordered) {
2383		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2384	} else {
2385		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2386	}
2387	SCTP_STAT_INCR(sctps_recvdata);
2388	/* Set it present please */
2389	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2390		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2391	}
2392	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2393		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2394		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2395	}
2396	SCTP_TCB_LOCK_ASSERT(stcb);
2397	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2398
2399	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
2400	    asoc->peer_supports_nr_sack &&
2401	    (SCTP_BASE_SYSCTL(sctp_do_drain) == 0)) {
2402		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2403		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2404			asoc->highest_tsn_inside_nr_map = tsn;
2405		}
2406	}
2407	/* check the special flag for stream resets */
2408	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2409	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2410	    (asoc->cumulative_tsn == liste->tsn))
2411	    ) {
2412		/*
2413		 * we have finished working through the backlogged TSN's now
2414		 * time to reset streams. 1: call reset function. 2: free
2415		 * pending_reply space 3: distribute any chunks in
2416		 * pending_reply_queue.
2417		 */
2418		struct sctp_queued_to_read *ctl;
2419
2420		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2421		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2422		SCTP_FREE(liste, SCTP_M_STRESET);
2423		/* sa_ignore FREED_MEMORY */
2424		liste = TAILQ_FIRST(&asoc->resetHead);
2425		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2426		if (ctl && (liste == NULL)) {
2427			/* All can be removed */
2428			while (ctl) {
2429				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2430				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2431				if (*abort_flag) {
2432					return (0);
2433				}
2434				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2435			}
2436		} else if (ctl) {
2437			/* more than one in queue */
2438			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2439				/*
2440				 * if ctl->sinfo_tsn is <= liste->tsn we can
2441				 * process it which is the NOT of
2442				 * ctl->sinfo_tsn > liste->tsn
2443				 */
2444				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2445				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2446				if (*abort_flag) {
2447					return (0);
2448				}
2449				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2450			}
2451		}
2452		/*
2453		 * Now service re-assembly to pick up anything that has been
2454		 * held on reassembly queue?
2455		 */
2456		sctp_deliver_reasm_check(stcb, asoc);
2457		need_reasm_check = 0;
2458	}
2459	if (need_reasm_check) {
2460		/* Another one waits ? */
2461		sctp_deliver_reasm_check(stcb, asoc);
2462	}
2463	return (1);
2464}
2465
2466int8_t sctp_map_lookup_tab[256] = {
2467	-1, 0, -1, 1, -1, 0, -1, 2,
2468	-1, 0, -1, 1, -1, 0, -1, 3,
2469	-1, 0, -1, 1, -1, 0, -1, 2,
2470	-1, 0, -1, 1, -1, 0, -1, 4,
2471	-1, 0, -1, 1, -1, 0, -1, 2,
2472	-1, 0, -1, 1, -1, 0, -1, 3,
2473	-1, 0, -1, 1, -1, 0, -1, 2,
2474	-1, 0, -1, 1, -1, 0, -1, 5,
2475	-1, 0, -1, 1, -1, 0, -1, 2,
2476	-1, 0, -1, 1, -1, 0, -1, 3,
2477	-1, 0, -1, 1, -1, 0, -1, 2,
2478	-1, 0, -1, 1, -1, 0, -1, 4,
2479	-1, 0, -1, 1, -1, 0, -1, 2,
2480	-1, 0, -1, 1, -1, 0, -1, 3,
2481	-1, 0, -1, 1, -1, 0, -1, 2,
2482	-1, 0, -1, 1, -1, 0, -1, 6,
2483	-1, 0, -1, 1, -1, 0, -1, 2,
2484	-1, 0, -1, 1, -1, 0, -1, 3,
2485	-1, 0, -1, 1, -1, 0, -1, 2,
2486	-1, 0, -1, 1, -1, 0, -1, 4,
2487	-1, 0, -1, 1, -1, 0, -1, 2,
2488	-1, 0, -1, 1, -1, 0, -1, 3,
2489	-1, 0, -1, 1, -1, 0, -1, 2,
2490	-1, 0, -1, 1, -1, 0, -1, 5,
2491	-1, 0, -1, 1, -1, 0, -1, 2,
2492	-1, 0, -1, 1, -1, 0, -1, 3,
2493	-1, 0, -1, 1, -1, 0, -1, 2,
2494	-1, 0, -1, 1, -1, 0, -1, 4,
2495	-1, 0, -1, 1, -1, 0, -1, 2,
2496	-1, 0, -1, 1, -1, 0, -1, 3,
2497	-1, 0, -1, 1, -1, 0, -1, 2,
2498	-1, 0, -1, 1, -1, 0, -1, 7,
2499};
2500
2501
2502void
2503sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2504{
2505	/*
2506	 * Now we also need to check the mapping array in a couple of ways.
2507	 * 1) Did we move the cum-ack point?
2508	 */
2509	struct sctp_association *asoc;
2510	int at;
2511	int last_all_ones = 0;
2512	int slide_from, slide_end, lgap, distance;
2513
2514	/* EY nr_mapping array variables */
2515	/* int nr_at; */
2516	/* int nr_last_all_ones = 0; */
2517	/* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
2518
2519	uint32_t old_cumack, old_base, old_highest;
2520	unsigned char aux_array[64];
2521
2522	/*
2523	 * EY! Don't think this is required but I am immitating the code for
2524	 * map just to make sure
2525	 */
2526	unsigned char nr_aux_array[64];
2527
2528	asoc = &stcb->asoc;
2529	at = 0;
2530
2531	old_cumack = asoc->cumulative_tsn;
2532	old_base = asoc->mapping_array_base_tsn;
2533	old_highest = asoc->highest_tsn_inside_map;
2534	if (asoc->mapping_array_size < 64)
2535		memcpy(aux_array, asoc->mapping_array,
2536		    asoc->mapping_array_size);
2537	else
2538		memcpy(aux_array, asoc->mapping_array, 64);
2539	/* EY do the same for nr_mapping_array */
2540	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2541
2542		if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
2543			/*
2544			 * printf("\nEY-IN sack_check method: \nEY-" "The
2545			 * size of map and nr_map are inconsitent")
2546			 */ ;
2547		}
2548		if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
2549			/*
2550			 * printf("\nEY-IN sack_check method VERY CRUCIAL
2551			 * error: \nEY-" "The base tsns of map and nr_map
2552			 * are inconsitent")
2553			 */ ;
2554		}
2555		/* EY! just immitating the above code */
2556		if (asoc->nr_mapping_array_size < 64)
2557			memcpy(nr_aux_array, asoc->nr_mapping_array,
2558			    asoc->nr_mapping_array_size);
2559		else
2560			memcpy(aux_array, asoc->nr_mapping_array, 64);
2561	}
2562	/*
2563	 * We could probably improve this a small bit by calculating the
2564	 * offset of the current cum-ack as the starting point.
2565	 */
2566	at = 0;
2567	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2568
2569		if (asoc->mapping_array[slide_from] == 0xff) {
2570			at += 8;
2571			last_all_ones = 1;
2572		} else {
2573			/* there is a 0 bit */
2574			at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]];
2575			last_all_ones = 0;
2576			break;
2577		}
2578	}
2579	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2580	/* at is one off, since in the table a embedded -1 is present */
2581	at++;
2582
2583	if (compare_with_wrap(asoc->cumulative_tsn,
2584	    asoc->highest_tsn_inside_map,
2585	    MAX_TSN)) {
2586#ifdef INVARIANTS
2587		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2588		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2589#else
2590		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2591		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2592		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2593			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2594		}
2595		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2596		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2597#endif
2598	}
2599	if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2600		/* The complete array was completed by a single FR */
2601		/* higest becomes the cum-ack */
2602		int clr;
2603
2604		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2605		/* clear the array */
2606		clr = (at >> 3) + 1;
2607		if (clr > asoc->mapping_array_size) {
2608			clr = asoc->mapping_array_size;
2609		}
2610		memset(asoc->mapping_array, 0, clr);
2611		/* base becomes one ahead of the cum-ack */
2612		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2613
2614		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2615
2616			if (clr > asoc->nr_mapping_array_size)
2617				clr = asoc->nr_mapping_array_size;
2618
2619			memset(asoc->nr_mapping_array, 0, clr);
2620			/* base becomes one ahead of the cum-ack */
2621			asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2622			asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2623		}
2624		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2625			sctp_log_map(old_base, old_cumack, old_highest,
2626			    SCTP_MAP_PREPARE_SLIDE);
2627			sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2628			    asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2629		}
2630	} else if (at >= 8) {
2631		/* we can slide the mapping array down */
2632		/* slide_from holds where we hit the first NON 0xff byte */
2633
2634		/*
2635		 * now calculate the ceiling of the move using our highest
2636		 * TSN value
2637		 */
2638		if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2639			lgap = asoc->highest_tsn_inside_map -
2640			    asoc->mapping_array_base_tsn;
2641		} else {
2642			lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2643			    asoc->highest_tsn_inside_map + 1;
2644		}
2645		slide_end = lgap >> 3;
2646		if (slide_end < slide_from) {
2647#ifdef INVARIANTS
2648			panic("impossible slide");
2649#else
2650			printf("impossible slide?\n");
2651			return;
2652#endif
2653		}
2654		if (slide_end > asoc->mapping_array_size) {
2655#ifdef INVARIANTS
2656			panic("would overrun buffer");
2657#else
2658			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2659			    asoc->mapping_array_size, slide_end);
2660			slide_end = asoc->mapping_array_size;
2661#endif
2662		}
2663		distance = (slide_end - slide_from) + 1;
2664		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2665			sctp_log_map(old_base, old_cumack, old_highest,
2666			    SCTP_MAP_PREPARE_SLIDE);
2667			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2668			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2669		}
2670		if (distance + slide_from > asoc->mapping_array_size ||
2671		    distance < 0) {
2672			/*
2673			 * Here we do NOT slide forward the array so that
2674			 * hopefully when more data comes in to fill it up
2675			 * we will be able to slide it forward. Really I
2676			 * don't think this should happen :-0
2677			 */
2678
2679			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2680				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2681				    (uint32_t) asoc->mapping_array_size,
2682				    SCTP_MAP_SLIDE_NONE);
2683			}
2684		} else {
2685			int ii;
2686
2687			for (ii = 0; ii < distance; ii++) {
2688				asoc->mapping_array[ii] =
2689				    asoc->mapping_array[slide_from + ii];
2690			}
2691			for (ii = distance; ii <= slide_end; ii++) {
2692				asoc->mapping_array[ii] = 0;
2693			}
2694			asoc->mapping_array_base_tsn += (slide_from << 3);
2695			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2696				sctp_log_map(asoc->mapping_array_base_tsn,
2697				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2698				    SCTP_MAP_SLIDE_RESULT);
2699			}
2700			/*
2701			 * EY if doing nr_sacks then slide the
2702			 * nr_mapping_array accordingly please
2703			 */
2704			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2705				for (ii = 0; ii < distance; ii++) {
2706					asoc->nr_mapping_array[ii] =
2707					    asoc->nr_mapping_array[slide_from + ii];
2708				}
2709				for (ii = distance; ii <= slide_end; ii++) {
2710					asoc->nr_mapping_array[ii] = 0;
2711				}
2712				asoc->nr_mapping_array_base_tsn += (slide_from << 3);
2713			}
2714		}
2715	}
2716	/*
2717	 * Now we need to see if we need to queue a sack or just start the
2718	 * timer (if allowed).
2719	 */
2720	if (ok_to_sack) {
2721		if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2722			/*
2723			 * Ok special case, in SHUTDOWN-SENT case. here we
2724			 * maker sure SACK timer is off and instead send a
2725			 * SHUTDOWN and a SACK
2726			 */
2727			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2728				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2729				    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2730			}
2731			sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2732			/*
2733			 * EY if nr_sacks used then send an nr-sack , a sack
2734			 * otherwise
2735			 */
2736			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2737				sctp_send_nr_sack(stcb);
2738			else
2739				sctp_send_sack(stcb);
2740		} else {
2741			int is_a_gap;
2742
2743			/* is there a gap now ? */
2744			is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2745			    stcb->asoc.cumulative_tsn, MAX_TSN);
2746
2747			/*
2748			 * CMT DAC algorithm: increase number of packets
2749			 * received since last ack
2750			 */
2751			stcb->asoc.cmt_dac_pkts_rcvd++;
2752
2753			if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2754								 * SACK */
2755			    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2756								 * longer is one */
2757			    (stcb->asoc.numduptsns) ||	/* we have dup's */
2758			    (is_a_gap) ||	/* is still a gap */
2759			    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2760			    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2761			    ) {
2762
2763				if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2764				    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2765				    (stcb->asoc.send_sack == 0) &&
2766				    (stcb->asoc.numduptsns == 0) &&
2767				    (stcb->asoc.delayed_ack) &&
2768				    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2769
2770					/*
2771					 * CMT DAC algorithm: With CMT,
2772					 * delay acks even in the face of
2773					 *
2774					 * reordering. Therefore, if acks that
2775					 * do not have to be sent because of
2776					 * the above reasons, will be
2777					 * delayed. That is, acks that would
2778					 * have been sent due to gap reports
2779					 * will be delayed with DAC. Start
2780					 * the delayed ack timer.
2781					 */
2782					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2783					    stcb->sctp_ep, stcb, NULL);
2784				} else {
2785					/*
2786					 * Ok we must build a SACK since the
2787					 * timer is pending, we got our
2788					 * first packet OR there are gaps or
2789					 * duplicates.
2790					 */
2791					(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2792					/*
2793					 * EY if nr_sacks used then send an
2794					 * nr-sack , a sack otherwise
2795					 */
2796					if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2797						sctp_send_nr_sack(stcb);
2798					else
2799						sctp_send_sack(stcb);
2800				}
2801			} else {
2802				if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2803					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2804					    stcb->sctp_ep, stcb, NULL);
2805				}
2806			}
2807		}
2808	}
2809}
2810
2811void
2812sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2813{
2814	struct sctp_tmit_chunk *chk;
2815	uint32_t tsize;
2816	uint16_t nxt_todel;
2817
2818	if (asoc->fragmented_delivery_inprogress) {
2819		sctp_service_reassembly(stcb, asoc);
2820	}
2821	/* Can we proceed further, i.e. the PD-API is complete */
2822	if (asoc->fragmented_delivery_inprogress) {
2823		/* no */
2824		return;
2825	}
2826	/*
2827	 * Now is there some other chunk I can deliver from the reassembly
2828	 * queue.
2829	 */
2830doit_again:
2831	chk = TAILQ_FIRST(&asoc->reasmqueue);
2832	if (chk == NULL) {
2833		asoc->size_on_reasm_queue = 0;
2834		asoc->cnt_on_reasm_queue = 0;
2835		return;
2836	}
2837	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2838	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2839	    ((nxt_todel == chk->rec.data.stream_seq) ||
2840	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2841		/*
2842		 * Yep the first one is here. We setup to start reception,
2843		 * by backing down the TSN just in case we can't deliver.
2844		 */
2845
2846		/*
2847		 * Before we start though either all of the message should
2848		 * be here or 1/4 the socket buffer max or nothing on the
2849		 * delivery queue and something can be delivered.
2850		 */
2851		if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2852		    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
2853			asoc->fragmented_delivery_inprogress = 1;
2854			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2855			asoc->str_of_pdapi = chk->rec.data.stream_number;
2856			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2857			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2858			asoc->fragment_flags = chk->rec.data.rcv_flags;
2859			sctp_service_reassembly(stcb, asoc);
2860			if (asoc->fragmented_delivery_inprogress == 0) {
2861				goto doit_again;
2862			}
2863		}
2864	}
2865}
2866
2867int
2868sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2869    struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2870    struct sctp_nets *net, uint32_t * high_tsn)
2871{
2872	struct sctp_data_chunk *ch, chunk_buf;
2873	struct sctp_association *asoc;
2874	int num_chunks = 0;	/* number of control chunks processed */
2875	int stop_proc = 0;
2876	int chk_length, break_flag, last_chunk;
2877	int abort_flag = 0, was_a_gap = 0;
2878	struct mbuf *m;
2879
2880	/* set the rwnd */
2881	sctp_set_rwnd(stcb, &stcb->asoc);
2882
2883	m = *mm;
2884	SCTP_TCB_LOCK_ASSERT(stcb);
2885	asoc = &stcb->asoc;
2886	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2887	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2888		/* there was a gap before this data was processed */
2889		was_a_gap = 1;
2890	}
2891	/*
2892	 * setup where we got the last DATA packet from for any SACK that
2893	 * may need to go out. Don't bump the net. This is done ONLY when a
2894	 * chunk is assigned.
2895	 */
2896	asoc->last_data_chunk_from = net;
2897
2898	/*-
2899	 * Now before we proceed we must figure out if this is a wasted
2900	 * cluster... i.e. it is a small packet sent in and yet the driver
2901	 * underneath allocated a full cluster for it. If so we must copy it
2902	 * to a smaller mbuf and free up the cluster mbuf. This will help
2903	 * with cluster starvation. Note for __Panda__ we don't do this
2904	 * since it has clusters all the way down to 64 bytes.
2905	 */
2906	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2907		/* we only handle mbufs that are singletons.. not chains */
2908		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2909		if (m) {
2910			/* ok lets see if we can copy the data up */
2911			caddr_t *from, *to;
2912
2913			/* get the pointers and copy */
2914			to = mtod(m, caddr_t *);
2915			from = mtod((*mm), caddr_t *);
2916			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2917			/* copy the length and free up the old */
2918			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2919			sctp_m_freem(*mm);
2920			/* sucess, back copy */
2921			*mm = m;
2922		} else {
2923			/* We are in trouble in the mbuf world .. yikes */
2924			m = *mm;
2925		}
2926	}
2927	/* get pointer to the first chunk header */
2928	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2929	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2930	if (ch == NULL) {
2931		return (1);
2932	}
2933	/*
2934	 * process all DATA chunks...
2935	 */
2936	*high_tsn = asoc->cumulative_tsn;
2937	break_flag = 0;
2938	asoc->data_pkts_seen++;
2939	while (stop_proc == 0) {
2940		/* validate chunk length */
2941		chk_length = ntohs(ch->ch.chunk_length);
2942		if (length - *offset < chk_length) {
2943			/* all done, mutulated chunk */
2944			stop_proc = 1;
2945			break;
2946		}
2947		if (ch->ch.chunk_type == SCTP_DATA) {
2948			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2949				/*
2950				 * Need to send an abort since we had a
2951				 * invalid data chunk.
2952				 */
2953				struct mbuf *op_err;
2954
2955				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2956				    0, M_DONTWAIT, 1, MT_DATA);
2957
2958				if (op_err) {
2959					struct sctp_paramhdr *ph;
2960					uint32_t *ippp;
2961
2962					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2963					    (2 * sizeof(uint32_t));
2964					ph = mtod(op_err, struct sctp_paramhdr *);
2965					ph->param_type =
2966					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2967					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2968					ippp = (uint32_t *) (ph + 1);
2969					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2970					ippp++;
2971					*ippp = asoc->cumulative_tsn;
2972
2973				}
2974				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2975				sctp_abort_association(inp, stcb, m, iphlen, sh,
2976				    op_err, 0, net->port);
2977				return (2);
2978			}
2979#ifdef SCTP_AUDITING_ENABLED
2980			sctp_audit_log(0xB1, 0);
2981#endif
2982			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2983				last_chunk = 1;
2984			} else {
2985				last_chunk = 0;
2986			}
2987			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2988			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2989			    last_chunk)) {
2990				num_chunks++;
2991			}
2992			if (abort_flag)
2993				return (2);
2994
2995			if (break_flag) {
2996				/*
2997				 * Set because of out of rwnd space and no
2998				 * drop rep space left.
2999				 */
3000				stop_proc = 1;
3001				break;
3002			}
3003		} else {
3004			/* not a data chunk in the data region */
3005			switch (ch->ch.chunk_type) {
3006			case SCTP_INITIATION:
3007			case SCTP_INITIATION_ACK:
3008			case SCTP_SELECTIVE_ACK:
3009			case SCTP_NR_SELECTIVE_ACK:	/* EY */
3010			case SCTP_HEARTBEAT_REQUEST:
3011			case SCTP_HEARTBEAT_ACK:
3012			case SCTP_ABORT_ASSOCIATION:
3013			case SCTP_SHUTDOWN:
3014			case SCTP_SHUTDOWN_ACK:
3015			case SCTP_OPERATION_ERROR:
3016			case SCTP_COOKIE_ECHO:
3017			case SCTP_COOKIE_ACK:
3018			case SCTP_ECN_ECHO:
3019			case SCTP_ECN_CWR:
3020			case SCTP_SHUTDOWN_COMPLETE:
3021			case SCTP_AUTHENTICATION:
3022			case SCTP_ASCONF_ACK:
3023			case SCTP_PACKET_DROPPED:
3024			case SCTP_STREAM_RESET:
3025			case SCTP_FORWARD_CUM_TSN:
3026			case SCTP_ASCONF:
3027				/*
3028				 * Now, what do we do with KNOWN chunks that
3029				 * are NOT in the right place?
3030				 *
3031				 * For now, I do nothing but ignore them. We
3032				 * may later want to add sysctl stuff to
3033				 * switch out and do either an ABORT() or
3034				 * possibly process them.
3035				 */
3036				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
3037					struct mbuf *op_err;
3038
3039					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
3040					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
3041					return (2);
3042				}
3043				break;
3044			default:
3045				/* unknown chunk type, use bit rules */
3046				if (ch->ch.chunk_type & 0x40) {
3047					/* Add a error report to the queue */
3048					struct mbuf *merr;
3049					struct sctp_paramhdr *phd;
3050
3051					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
3052					if (merr) {
3053						phd = mtod(merr, struct sctp_paramhdr *);
3054						/*
3055						 * We cheat and use param
3056						 * type since we did not
3057						 * bother to define a error
3058						 * cause struct. They are
3059						 * the same basic format
3060						 * with different names.
3061						 */
3062						phd->param_type =
3063						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
3064						phd->param_length =
3065						    htons(chk_length + sizeof(*phd));
3066						SCTP_BUF_LEN(merr) = sizeof(*phd);
3067						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
3068						    SCTP_SIZE32(chk_length),
3069						    M_DONTWAIT);
3070						if (SCTP_BUF_NEXT(merr)) {
3071							sctp_queue_op_err(stcb, merr);
3072						} else {
3073							sctp_m_freem(merr);
3074						}
3075					}
3076				}
3077				if ((ch->ch.chunk_type & 0x80) == 0) {
3078					/* discard the rest of this packet */
3079					stop_proc = 1;
3080				}	/* else skip this bad chunk and
3081					 * continue... */
3082				break;
3083			};	/* switch of chunk type */
3084		}
3085		*offset += SCTP_SIZE32(chk_length);
3086		if ((*offset >= length) || stop_proc) {
3087			/* no more data left in the mbuf chain */
3088			stop_proc = 1;
3089			continue;
3090		}
3091		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
3092		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
3093		if (ch == NULL) {
3094			*offset = length;
3095			stop_proc = 1;
3096			break;
3097
3098		}
3099	}			/* while */
3100	if (break_flag) {
3101		/*
3102		 * we need to report rwnd overrun drops.
3103		 */
3104		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
3105	}
3106	if (num_chunks) {
3107		/*
3108		 * Did we get data, if so update the time for auto-close and
3109		 * give peer credit for being alive.
3110		 */
3111		SCTP_STAT_INCR(sctps_recvpktwithdata);
3112		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3113			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3114			    stcb->asoc.overall_error_count,
3115			    0,
3116			    SCTP_FROM_SCTP_INDATA,
3117			    __LINE__);
3118		}
3119		stcb->asoc.overall_error_count = 0;
3120		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
3121	}
3122	/* now service all of the reassm queue if needed */
3123	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
3124		sctp_service_queues(stcb, asoc);
3125
3126	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
3127		/* Assure that we ack right away */
3128		stcb->asoc.send_sack = 1;
3129	}
3130	/* Start a sack timer or QUEUE a SACK for sending */
3131	if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
3132	    (stcb->asoc.mapping_array[0] != 0xff)) {
3133		if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
3134		    (stcb->asoc.delayed_ack == 0) ||
3135		    (stcb->asoc.numduptsns) ||
3136		    (stcb->asoc.send_sack == 1)) {
3137			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3138				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
3139			}
3140			/*
3141			 * EY if nr_sacks used then send an nr-sack , a sack
3142			 * otherwise
3143			 */
3144			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
3145				sctp_send_nr_sack(stcb);
3146			else
3147				sctp_send_sack(stcb);
3148		} else {
3149			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3150				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
3151				    stcb->sctp_ep, stcb, NULL);
3152			}
3153		}
3154	} else {
3155		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
3156	}
3157	if (abort_flag)
3158		return (2);
3159
3160	return (0);
3161}
3162
3163static void
3164sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3165    struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3166    uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3167    int num_seg, int *ecn_seg_sums)
3168{
3169	/************************************************/
3170	/* process fragments and update sendqueue        */
3171	/************************************************/
3172	struct sctp_sack *sack;
3173	struct sctp_gap_ack_block *frag, block;
3174	struct sctp_tmit_chunk *tp1;
3175	int i, j;
3176	unsigned int theTSN;
3177	int num_frs = 0;
3178
3179	uint16_t frag_strt, frag_end, primary_flag_set;
3180	u_long last_frag_high;
3181
3182	/*
3183	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
3184	 */
3185	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3186		primary_flag_set = 1;
3187	} else {
3188		primary_flag_set = 0;
3189	}
3190	sack = &ch->sack;
3191
3192	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3193	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3194	*offset += sizeof(block);
3195	if (frag == NULL) {
3196		return;
3197	}
3198	tp1 = NULL;
3199	last_frag_high = 0;
3200	for (i = 0; i < num_seg; i++) {
3201		frag_strt = ntohs(frag->start);
3202		frag_end = ntohs(frag->end);
3203		/* some sanity checks on the fragment offsets */
3204		if (frag_strt > frag_end) {
3205			/* this one is malformed, skip */
3206			frag++;
3207			continue;
3208		}
3209		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3210		    MAX_TSN))
3211			*biggest_tsn_acked = frag_end + last_tsn;
3212
3213		/* mark acked dgs and find out the highestTSN being acked */
3214		if (tp1 == NULL) {
3215			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3216
3217			/* save the locations of the last frags */
3218			last_frag_high = frag_end + last_tsn;
3219		} else {
3220			/*
3221			 * now lets see if we need to reset the queue due to
3222			 * a out-of-order SACK fragment
3223			 */
3224			if (compare_with_wrap(frag_strt + last_tsn,
3225			    last_frag_high, MAX_TSN)) {
3226				/*
3227				 * if the new frag starts after the last TSN
3228				 * frag covered, we are ok and this one is
3229				 * beyond the last one
3230				 */
3231				;
3232			} else {
3233				/*
3234				 * ok, they have reset us, so we need to
3235				 * reset the queue this will cause extra
3236				 * hunting but hey, they chose the
3237				 * performance hit when they failed to order
3238				 * their gaps
3239				 */
3240				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3241			}
3242			last_frag_high = frag_end + last_tsn;
3243		}
3244		for (j = frag_strt; j <= frag_end; j++) {
3245			theTSN = j + last_tsn;
3246			while (tp1) {
3247				if (tp1->rec.data.doing_fast_retransmit)
3248					num_frs++;
3249
3250				/*
3251				 * CMT: CUCv2 algorithm. For each TSN being
3252				 * processed from the sent queue, track the
3253				 * next expected pseudo-cumack, or
3254				 * rtx_pseudo_cumack, if required. Separate
3255				 * cumack trackers for first transmissions,
3256				 * and retransmissions.
3257				 */
3258				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3259				    (tp1->snd_count == 1)) {
3260					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
3261					tp1->whoTo->find_pseudo_cumack = 0;
3262				}
3263				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3264				    (tp1->snd_count > 1)) {
3265					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
3266					tp1->whoTo->find_rtx_pseudo_cumack = 0;
3267				}
3268				if (tp1->rec.data.TSN_seq == theTSN) {
3269					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3270						/*
3271						 * must be held until
3272						 * cum-ack passes
3273						 */
3274						/*
3275						 * ECN Nonce: Add the nonce
3276						 * value to the sender's
3277						 * nonce sum
3278						 */
3279						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3280							/*-
3281							 * If it is less than RESEND, it is
3282							 * now no-longer in flight.
3283							 * Higher values may already be set
3284							 * via previous Gap Ack Blocks...
3285							 * i.e. ACKED or RESEND.
3286							 */
3287							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3288							    *biggest_newly_acked_tsn, MAX_TSN)) {
3289								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
3290							}
3291							/*
3292							 * CMT: SFR algo
3293							 * (and HTNA) - set
3294							 * saw_newack to 1
3295							 * for dest being
3296							 * newly acked.
3297							 * update
3298							 * this_sack_highest_
3299							 * newack if
3300							 * appropriate.
3301							 */
3302							if (tp1->rec.data.chunk_was_revoked == 0)
3303								tp1->whoTo->saw_newack = 1;
3304
3305							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3306							    tp1->whoTo->this_sack_highest_newack,
3307							    MAX_TSN)) {
3308								tp1->whoTo->this_sack_highest_newack =
3309								    tp1->rec.data.TSN_seq;
3310							}
3311							/*
3312							 * CMT DAC algo:
3313							 * also update
3314							 * this_sack_lowest_n
3315							 * ewack
3316							 */
3317							if (*this_sack_lowest_newack == 0) {
3318								if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3319									sctp_log_sack(*this_sack_lowest_newack,
3320									    last_tsn,
3321									    tp1->rec.data.TSN_seq,
3322									    0,
3323									    0,
3324									    SCTP_LOG_TSN_ACKED);
3325								}
3326								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
3327							}
3328							/*
3329							 * CMT: CUCv2
3330							 * algorithm. If
3331							 * (rtx-)pseudo-cumac
3332							 * k for corresp
3333							 * dest is being
3334							 * acked, then we
3335							 * have a new
3336							 * (rtx-)pseudo-cumac
3337							 * k. Set
3338							 * new_(rtx_)pseudo_c
3339							 * umack to TRUE so
3340							 * that the cwnd for
3341							 * this dest can be
3342							 * updated. Also
3343							 * trigger search
3344							 * for the next
3345							 * expected
3346							 * (rtx-)pseudo-cumac
3347							 * k. Separate
3348							 * pseudo_cumack
3349							 * trackers for
3350							 * first
3351							 * transmissions and
3352							 * retransmissions.
3353							 */
3354							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
3355								if (tp1->rec.data.chunk_was_revoked == 0) {
3356									tp1->whoTo->new_pseudo_cumack = 1;
3357								}
3358								tp1->whoTo->find_pseudo_cumack = 1;
3359							}
3360							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3361								sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3362							}
3363							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3364								if (tp1->rec.data.chunk_was_revoked == 0) {
3365									tp1->whoTo->new_pseudo_cumack = 1;
3366								}
3367								tp1->whoTo->find_rtx_pseudo_cumack = 1;
3368							}
3369							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3370								sctp_log_sack(*biggest_newly_acked_tsn,
3371								    last_tsn,
3372								    tp1->rec.data.TSN_seq,
3373								    frag_strt,
3374								    frag_end,
3375								    SCTP_LOG_TSN_ACKED);
3376							}
3377							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3378								sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3379								    tp1->whoTo->flight_size,
3380								    tp1->book_size,
3381								    (uintptr_t) tp1->whoTo,
3382								    tp1->rec.data.TSN_seq);
3383							}
3384							sctp_flight_size_decrease(tp1);
3385							sctp_total_flight_decrease(stcb, tp1);
3386
3387							tp1->whoTo->net_ack += tp1->send_size;
3388							if (tp1->snd_count < 2) {
3389								/*
3390								 * True
3391								 * non-retran
3392								 * smited
3393								 * chunk */
3394								tp1->whoTo->net_ack2 += tp1->send_size;
3395
3396								/*
3397								 * update RTO
3398								 * too ? */
3399								if (tp1->do_rtt) {
3400									tp1->whoTo->RTO =
3401									    sctp_calculate_rto(stcb,
3402									    asoc,
3403									    tp1->whoTo,
3404									    &tp1->sent_rcv_time,
3405									    sctp_align_safe_nocopy);
3406									tp1->do_rtt = 0;
3407								}
3408							}
3409						}
3410						if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3411							(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3412							(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3413							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3414							    asoc->this_sack_highest_gap,
3415							    MAX_TSN)) {
3416								asoc->this_sack_highest_gap =
3417								    tp1->rec.data.TSN_seq;
3418							}
3419							if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3420								sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3421#ifdef SCTP_AUDITING_ENABLED
3422								sctp_audit_log(0xB2,
3423								    (asoc->sent_queue_retran_cnt & 0x000000ff));
3424#endif
3425							}
3426						}
3427						/*
3428						 * All chunks NOT UNSENT
3429						 * fall through here and are
3430						 * marked (leave PR-SCTP
3431						 * ones that are to skip
3432						 * alone though)
3433						 */
3434						if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3435							tp1->sent = SCTP_DATAGRAM_MARKED;
3436
3437						if (tp1->rec.data.chunk_was_revoked) {
3438							/* deflate the cwnd */
3439							tp1->whoTo->cwnd -= tp1->book_size;
3440							tp1->rec.data.chunk_was_revoked = 0;
3441						}
3442					}
3443					break;
3444				}	/* if (tp1->TSN_seq == theTSN) */
3445				if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3446				    MAX_TSN))
3447					break;
3448
3449				tp1 = TAILQ_NEXT(tp1, sctp_next);
3450			}	/* end while (tp1) */
3451		}		/* end for (j = fragStart */
3452		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3453		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3454		*offset += sizeof(block);
3455		if (frag == NULL) {
3456			break;
3457		}
3458	}
3459	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3460		if (num_frs)
3461			sctp_log_fr(*biggest_tsn_acked,
3462			    *biggest_newly_acked_tsn,
3463			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3464	}
3465}
3466
3467static void
3468sctp_check_for_revoked(struct sctp_tcb *stcb,
3469    struct sctp_association *asoc, uint32_t cumack,
3470    u_long biggest_tsn_acked)
3471{
3472	struct sctp_tmit_chunk *tp1;
3473	int tot_revoked = 0;
3474
3475	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3476	while (tp1) {
3477		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3478		    MAX_TSN)) {
3479			/*
3480			 * ok this guy is either ACK or MARKED. If it is
3481			 * ACKED it has been previously acked but not this
3482			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3483			 * again.
3484			 */
3485			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3486			    MAX_TSN))
3487				break;
3488
3489
3490			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3491				/* it has been revoked */
3492				tp1->sent = SCTP_DATAGRAM_SENT;
3493				tp1->rec.data.chunk_was_revoked = 1;
3494				/*
3495				 * We must add this stuff back in to assure
3496				 * timers and such get started.
3497				 */
3498				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3499					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3500					    tp1->whoTo->flight_size,
3501					    tp1->book_size,
3502					    (uintptr_t) tp1->whoTo,
3503					    tp1->rec.data.TSN_seq);
3504				}
3505				sctp_flight_size_increase(tp1);
3506				sctp_total_flight_increase(stcb, tp1);
3507				/*
3508				 * We inflate the cwnd to compensate for our
3509				 * artificial inflation of the flight_size.
3510				 */
3511				tp1->whoTo->cwnd += tp1->book_size;
3512				tot_revoked++;
3513				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3514					sctp_log_sack(asoc->last_acked_seq,
3515					    cumack,
3516					    tp1->rec.data.TSN_seq,
3517					    0,
3518					    0,
3519					    SCTP_LOG_TSN_REVOKED);
3520				}
3521			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3522				/* it has been re-acked in this SACK */
3523				tp1->sent = SCTP_DATAGRAM_ACKED;
3524			}
3525		}
3526		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3527			break;
3528		tp1 = TAILQ_NEXT(tp1, sctp_next);
3529	}
3530	if (tot_revoked > 0) {
3531		/*
3532		 * Setup the ecn nonce re-sync point. We do this since once
3533		 * data is revoked we begin to retransmit things, which do
3534		 * NOT have the ECN bits set. This means we are now out of
3535		 * sync and must wait until we get back in sync with the
3536		 * peer to check ECN bits.
3537		 */
3538		tp1 = TAILQ_FIRST(&asoc->send_queue);
3539		if (tp1 == NULL) {
3540			asoc->nonce_resync_tsn = asoc->sending_seq;
3541		} else {
3542			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3543		}
3544		asoc->nonce_wait_for_ecne = 0;
3545		asoc->nonce_sum_check = 0;
3546	}
3547}
3548
3549
3550static void
3551sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3552    u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3553{
3554	struct sctp_tmit_chunk *tp1;
3555	int strike_flag = 0;
3556	struct timeval now;
3557	int tot_retrans = 0;
3558	uint32_t sending_seq;
3559	struct sctp_nets *net;
3560	int num_dests_sacked = 0;
3561
3562	/*
3563	 * select the sending_seq, this is either the next thing ready to be
3564	 * sent but not transmitted, OR, the next seq we assign.
3565	 */
3566	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3567	if (tp1 == NULL) {
3568		sending_seq = asoc->sending_seq;
3569	} else {
3570		sending_seq = tp1->rec.data.TSN_seq;
3571	}
3572
3573	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3574	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3575		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3576			if (net->saw_newack)
3577				num_dests_sacked++;
3578		}
3579	}
3580	if (stcb->asoc.peer_supports_prsctp) {
3581		(void)SCTP_GETTIME_TIMEVAL(&now);
3582	}
3583	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3584	while (tp1) {
3585		strike_flag = 0;
3586		if (tp1->no_fr_allowed) {
3587			/* this one had a timeout or something */
3588			tp1 = TAILQ_NEXT(tp1, sctp_next);
3589			continue;
3590		}
3591		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3592			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3593				sctp_log_fr(biggest_tsn_newly_acked,
3594				    tp1->rec.data.TSN_seq,
3595				    tp1->sent,
3596				    SCTP_FR_LOG_CHECK_STRIKE);
3597		}
3598		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3599		    MAX_TSN) ||
3600		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3601			/* done */
3602			break;
3603		}
3604		if (stcb->asoc.peer_supports_prsctp) {
3605			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3606				/* Is it expired? */
3607				if (
3608				/*
3609				 * TODO sctp_constants.h needs alternative
3610				 * time macros when _KERNEL is undefined.
3611				 */
3612				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3613				    ) {
3614					/* Yes so drop it */
3615					if (tp1->data != NULL) {
3616						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3617						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3618						    SCTP_SO_NOT_LOCKED);
3619					}
3620					tp1 = TAILQ_NEXT(tp1, sctp_next);
3621					continue;
3622				}
3623			}
3624			if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3625				/* Has it been retransmitted tv_sec times? */
3626				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3627					/* Yes, so drop it */
3628					if (tp1->data != NULL) {
3629						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3630						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3631						    SCTP_SO_NOT_LOCKED);
3632					}
3633					tp1 = TAILQ_NEXT(tp1, sctp_next);
3634					continue;
3635				}
3636			}
3637		}
3638		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3639		    asoc->this_sack_highest_gap, MAX_TSN)) {
3640			/* we are beyond the tsn in the sack  */
3641			break;
3642		}
3643		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3644			/* either a RESEND, ACKED, or MARKED */
3645			/* skip */
3646			tp1 = TAILQ_NEXT(tp1, sctp_next);
3647			continue;
3648		}
3649		/*
3650		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3651		 */
3652		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3653			/*
3654			 * No new acks were receieved for data sent to this
3655			 * dest. Therefore, according to the SFR algo for
3656			 * CMT, no data sent to this dest can be marked for
3657			 * FR using this SACK.
3658			 */
3659			tp1 = TAILQ_NEXT(tp1, sctp_next);
3660			continue;
3661		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3662		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3663			/*
3664			 * CMT: New acks were receieved for data sent to
3665			 * this dest. But no new acks were seen for data
3666			 * sent after tp1. Therefore, according to the SFR
3667			 * algo for CMT, tp1 cannot be marked for FR using
3668			 * this SACK. This step covers part of the DAC algo
3669			 * and the HTNA algo as well.
3670			 */
3671			tp1 = TAILQ_NEXT(tp1, sctp_next);
3672			continue;
3673		}
3674		/*
3675		 * Here we check to see if we were have already done a FR
3676		 * and if so we see if the biggest TSN we saw in the sack is
3677		 * smaller than the recovery point. If so we don't strike
3678		 * the tsn... otherwise we CAN strike the TSN.
3679		 */
3680		/*
3681		 * @@@ JRI: Check for CMT if (accum_moved &&
3682		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3683		 * 0)) {
3684		 */
3685		if (accum_moved && asoc->fast_retran_loss_recovery) {
3686			/*
3687			 * Strike the TSN if in fast-recovery and cum-ack
3688			 * moved.
3689			 */
3690			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3691				sctp_log_fr(biggest_tsn_newly_acked,
3692				    tp1->rec.data.TSN_seq,
3693				    tp1->sent,
3694				    SCTP_FR_LOG_STRIKE_CHUNK);
3695			}
3696			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3697				tp1->sent++;
3698			}
3699			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3700				/*
3701				 * CMT DAC algorithm: If SACK flag is set to
3702				 * 0, then lowest_newack test will not pass
3703				 * because it would have been set to the
3704				 * cumack earlier. If not already to be
3705				 * rtx'd, If not a mixed sack and if tp1 is
3706				 * not between two sacked TSNs, then mark by
3707				 * one more. NOTE that we are marking by one
3708				 * additional time since the SACK DAC flag
3709				 * indicates that two packets have been
3710				 * received after this missing TSN.
3711				 */
3712				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3713				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3714					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3715						sctp_log_fr(16 + num_dests_sacked,
3716						    tp1->rec.data.TSN_seq,
3717						    tp1->sent,
3718						    SCTP_FR_LOG_STRIKE_CHUNK);
3719					}
3720					tp1->sent++;
3721				}
3722			}
3723		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3724			/*
3725			 * For those that have done a FR we must take
3726			 * special consideration if we strike. I.e the
3727			 * biggest_newly_acked must be higher than the
3728			 * sending_seq at the time we did the FR.
3729			 */
3730			if (
3731#ifdef SCTP_FR_TO_ALTERNATE
3732			/*
3733			 * If FR's go to new networks, then we must only do
3734			 * this for singly homed asoc's. However if the FR's
3735			 * go to the same network (Armando's work) then its
3736			 * ok to FR multiple times.
3737			 */
3738			    (asoc->numnets < 2)
3739#else
3740			    (1)
3741#endif
3742			    ) {
3743
3744				if ((compare_with_wrap(biggest_tsn_newly_acked,
3745				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3746				    (biggest_tsn_newly_acked ==
3747				    tp1->rec.data.fast_retran_tsn)) {
3748					/*
3749					 * Strike the TSN, since this ack is
3750					 * beyond where things were when we
3751					 * did a FR.
3752					 */
3753					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3754						sctp_log_fr(biggest_tsn_newly_acked,
3755						    tp1->rec.data.TSN_seq,
3756						    tp1->sent,
3757						    SCTP_FR_LOG_STRIKE_CHUNK);
3758					}
3759					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3760						tp1->sent++;
3761					}
3762					strike_flag = 1;
3763					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3764						/*
3765						 * CMT DAC algorithm: If
3766						 * SACK flag is set to 0,
3767						 * then lowest_newack test
3768						 * will not pass because it
3769						 * would have been set to
3770						 * the cumack earlier. If
3771						 * not already to be rtx'd,
3772						 * If not a mixed sack and
3773						 * if tp1 is not between two
3774						 * sacked TSNs, then mark by
3775						 * one more. NOTE that we
3776						 * are marking by one
3777						 * additional time since the
3778						 * SACK DAC flag indicates
3779						 * that two packets have
3780						 * been received after this
3781						 * missing TSN.
3782						 */
3783						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3784						    (num_dests_sacked == 1) &&
3785						    compare_with_wrap(this_sack_lowest_newack,
3786						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3787							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3788								sctp_log_fr(32 + num_dests_sacked,
3789								    tp1->rec.data.TSN_seq,
3790								    tp1->sent,
3791								    SCTP_FR_LOG_STRIKE_CHUNK);
3792							}
3793							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3794								tp1->sent++;
3795							}
3796						}
3797					}
3798				}
3799			}
3800			/*
3801			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3802			 * algo covers HTNA.
3803			 */
3804		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3805		    biggest_tsn_newly_acked, MAX_TSN)) {
3806			/*
3807			 * We don't strike these: This is the  HTNA
3808			 * algorithm i.e. we don't strike If our TSN is
3809			 * larger than the Highest TSN Newly Acked.
3810			 */
3811			;
3812		} else {
3813			/* Strike the TSN */
3814			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3815				sctp_log_fr(biggest_tsn_newly_acked,
3816				    tp1->rec.data.TSN_seq,
3817				    tp1->sent,
3818				    SCTP_FR_LOG_STRIKE_CHUNK);
3819			}
3820			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3821				tp1->sent++;
3822			}
3823			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3824				/*
3825				 * CMT DAC algorithm: If SACK flag is set to
3826				 * 0, then lowest_newack test will not pass
3827				 * because it would have been set to the
3828				 * cumack earlier. If not already to be
3829				 * rtx'd, If not a mixed sack and if tp1 is
3830				 * not between two sacked TSNs, then mark by
3831				 * one more. NOTE that we are marking by one
3832				 * additional time since the SACK DAC flag
3833				 * indicates that two packets have been
3834				 * received after this missing TSN.
3835				 */
3836				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3837				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3838					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3839						sctp_log_fr(48 + num_dests_sacked,
3840						    tp1->rec.data.TSN_seq,
3841						    tp1->sent,
3842						    SCTP_FR_LOG_STRIKE_CHUNK);
3843					}
3844					tp1->sent++;
3845				}
3846			}
3847		}
3848		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3849			/* Increment the count to resend */
3850			struct sctp_nets *alt;
3851
3852			/* printf("OK, we are now ready to FR this guy\n"); */
3853			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3854				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3855				    0, SCTP_FR_MARKED);
3856			}
3857			if (strike_flag) {
3858				/* This is a subsequent FR */
3859				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3860			}
3861			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3862			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3863				/*
3864				 * CMT: Using RTX_SSTHRESH policy for CMT.
3865				 * If CMT is being used, then pick dest with
3866				 * largest ssthresh for any retransmission.
3867				 */
3868				tp1->no_fr_allowed = 1;
3869				alt = tp1->whoTo;
3870				/* sa_ignore NO_NULL_CHK */
3871				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3872					/*
3873					 * JRS 5/18/07 - If CMT PF is on,
3874					 * use the PF version of
3875					 * find_alt_net()
3876					 */
3877					alt = sctp_find_alternate_net(stcb, alt, 2);
3878				} else {
3879					/*
3880					 * JRS 5/18/07 - If only CMT is on,
3881					 * use the CMT version of
3882					 * find_alt_net()
3883					 */
3884					/* sa_ignore NO_NULL_CHK */
3885					alt = sctp_find_alternate_net(stcb, alt, 1);
3886				}
3887				if (alt == NULL) {
3888					alt = tp1->whoTo;
3889				}
3890				/*
3891				 * CUCv2: If a different dest is picked for
3892				 * the retransmission, then new
3893				 * (rtx-)pseudo_cumack needs to be tracked
3894				 * for orig dest. Let CUCv2 track new (rtx-)
3895				 * pseudo-cumack always.
3896				 */
3897				if (tp1->whoTo) {
3898					tp1->whoTo->find_pseudo_cumack = 1;
3899					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3900				}
3901			} else {/* CMT is OFF */
3902
3903#ifdef SCTP_FR_TO_ALTERNATE
3904				/* Can we find an alternate? */
3905				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3906#else
3907				/*
3908				 * default behavior is to NOT retransmit
3909				 * FR's to an alternate. Armando Caro's
3910				 * paper details why.
3911				 */
3912				alt = tp1->whoTo;
3913#endif
3914			}
3915
3916			tp1->rec.data.doing_fast_retransmit = 1;
3917			tot_retrans++;
3918			/* mark the sending seq for possible subsequent FR's */
3919			/*
3920			 * printf("Marking TSN for FR new value %x\n",
3921			 * (uint32_t)tpi->rec.data.TSN_seq);
3922			 */
3923			if (TAILQ_EMPTY(&asoc->send_queue)) {
3924				/*
3925				 * If the queue of send is empty then its
3926				 * the next sequence number that will be
3927				 * assigned so we subtract one from this to
3928				 * get the one we last sent.
3929				 */
3930				tp1->rec.data.fast_retran_tsn = sending_seq;
3931			} else {
3932				/*
3933				 * If there are chunks on the send queue
3934				 * (unsent data that has made it from the
3935				 * stream queues but not out the door, we
3936				 * take the first one (which will have the
3937				 * lowest TSN) and subtract one to get the
3938				 * one we last sent.
3939				 */
3940				struct sctp_tmit_chunk *ttt;
3941
3942				ttt = TAILQ_FIRST(&asoc->send_queue);
3943				tp1->rec.data.fast_retran_tsn =
3944				    ttt->rec.data.TSN_seq;
3945			}
3946
3947			if (tp1->do_rtt) {
3948				/*
3949				 * this guy had a RTO calculation pending on
3950				 * it, cancel it
3951				 */
3952				tp1->do_rtt = 0;
3953			}
3954			/* fix counts and things */
3955			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3956				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3957				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3958				    tp1->book_size,
3959				    (uintptr_t) tp1->whoTo,
3960				    tp1->rec.data.TSN_seq);
3961			}
3962			if (tp1->whoTo) {
3963				tp1->whoTo->net_ack++;
3964				sctp_flight_size_decrease(tp1);
3965			}
3966			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3967				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3968				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3969			}
3970			/* add back to the rwnd */
3971			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3972
3973			/* remove from the total flight */
3974			sctp_total_flight_decrease(stcb, tp1);
3975			if (alt != tp1->whoTo) {
3976				/* yes, there is an alternate. */
3977				sctp_free_remote_addr(tp1->whoTo);
3978				/* sa_ignore FREED_MEMORY */
3979				tp1->whoTo = alt;
3980				atomic_add_int(&alt->ref_count, 1);
3981			}
3982		}
3983		tp1 = TAILQ_NEXT(tp1, sctp_next);
3984	}			/* while (tp1) */
3985
3986	if (tot_retrans > 0) {
3987		/*
3988		 * Setup the ecn nonce re-sync point. We do this since once
3989		 * we go to FR something we introduce a Karn's rule scenario
3990		 * and won't know the totals for the ECN bits.
3991		 */
3992		asoc->nonce_resync_tsn = sending_seq;
3993		asoc->nonce_wait_for_ecne = 0;
3994		asoc->nonce_sum_check = 0;
3995	}
3996}
3997
3998struct sctp_tmit_chunk *
3999sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
4000    struct sctp_association *asoc)
4001{
4002	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
4003	struct timeval now;
4004	int now_filled = 0;
4005
4006	if (asoc->peer_supports_prsctp == 0) {
4007		return (NULL);
4008	}
4009	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4010	while (tp1) {
4011		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
4012		    tp1->sent != SCTP_DATAGRAM_RESEND) {
4013			/* no chance to advance, out of here */
4014			break;
4015		}
4016		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4017			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4018				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4019				    asoc->advanced_peer_ack_point,
4020				    tp1->rec.data.TSN_seq, 0, 0);
4021			}
4022		}
4023		if (!PR_SCTP_ENABLED(tp1->flags)) {
4024			/*
4025			 * We can't fwd-tsn past any that are reliable aka
4026			 * retransmitted until the asoc fails.
4027			 */
4028			break;
4029		}
4030		if (!now_filled) {
4031			(void)SCTP_GETTIME_TIMEVAL(&now);
4032			now_filled = 1;
4033		}
4034		tp2 = TAILQ_NEXT(tp1, sctp_next);
4035		/*
4036		 * now we got a chunk which is marked for another
4037		 * retransmission to a PR-stream but has run out its chances
4038		 * already maybe OR has been marked to skip now. Can we skip
4039		 * it if its a resend?
4040		 */
4041		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
4042		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
4043			/*
4044			 * Now is this one marked for resend and its time is
4045			 * now up?
4046			 */
4047			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
4048				/* Yes so drop it */
4049				if (tp1->data) {
4050					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
4051					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
4052					    SCTP_SO_NOT_LOCKED);
4053				}
4054			} else {
4055				/*
4056				 * No, we are done when hit one for resend
4057				 * whos time as not expired.
4058				 */
4059				break;
4060			}
4061		}
4062		/*
4063		 * Ok now if this chunk is marked to drop it we can clean up
4064		 * the chunk, advance our peer ack point and we can check
4065		 * the next chunk.
4066		 */
4067		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4068			/* advance PeerAckPoint goes forward */
4069			if (compare_with_wrap(tp1->rec.data.TSN_seq,
4070			    asoc->advanced_peer_ack_point,
4071			    MAX_TSN)) {
4072
4073				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
4074				a_adv = tp1;
4075			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
4076				/* No update but we do save the chk */
4077				a_adv = tp1;
4078			}
4079		} else {
4080			/*
4081			 * If it is still in RESEND we can advance no
4082			 * further
4083			 */
4084			break;
4085		}
4086		/*
4087		 * If we hit here we just dumped tp1, move to next tsn on
4088		 * sent queue.
4089		 */
4090		tp1 = tp2;
4091	}
4092	return (a_adv);
4093}
4094
4095static int
4096sctp_fs_audit(struct sctp_association *asoc)
4097{
4098	struct sctp_tmit_chunk *chk;
4099	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
4100	int entry_flight, entry_cnt, ret;
4101
4102	entry_flight = asoc->total_flight;
4103	entry_cnt = asoc->total_flight_count;
4104	ret = 0;
4105
4106	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
4107		return (0);
4108
4109	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4110		if (chk->sent < SCTP_DATAGRAM_RESEND) {
4111			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
4112			    chk->rec.data.TSN_seq,
4113			    chk->send_size,
4114			    chk->snd_count
4115			    );
4116			inflight++;
4117		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
4118			resend++;
4119		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
4120			inbetween++;
4121		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
4122			above++;
4123		} else {
4124			acked++;
4125		}
4126	}
4127
4128	if ((inflight > 0) || (inbetween > 0)) {
4129#ifdef INVARIANTS
4130		panic("Flight size-express incorrect? \n");
4131#else
4132		printf("asoc->total_flight:%d cnt:%d\n",
4133		    entry_flight, entry_cnt);
4134
4135		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
4136		    inflight, inbetween, resend, above, acked);
4137		ret = 1;
4138#endif
4139	}
4140	return (ret);
4141}
4142
4143
4144static void
4145sctp_window_probe_recovery(struct sctp_tcb *stcb,
4146    struct sctp_association *asoc,
4147    struct sctp_nets *net,
4148    struct sctp_tmit_chunk *tp1)
4149{
4150	tp1->window_probe = 0;
4151	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
4152		/* TSN's skipped we do NOT move back. */
4153		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
4154		    tp1->whoTo->flight_size,
4155		    tp1->book_size,
4156		    (uintptr_t) tp1->whoTo,
4157		    tp1->rec.data.TSN_seq);
4158		return;
4159	}
4160	/* First setup this by shrinking flight */
4161	sctp_flight_size_decrease(tp1);
4162	sctp_total_flight_decrease(stcb, tp1);
4163	/* Now mark for resend */
4164	tp1->sent = SCTP_DATAGRAM_RESEND;
4165	asoc->sent_queue_retran_cnt++;
4166	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4167		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
4168		    tp1->whoTo->flight_size,
4169		    tp1->book_size,
4170		    (uintptr_t) tp1->whoTo,
4171		    tp1->rec.data.TSN_seq);
4172	}
4173}
4174
4175void
4176sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4177    uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4178{
4179	struct sctp_nets *net;
4180	struct sctp_association *asoc;
4181	struct sctp_tmit_chunk *tp1, *tp2;
4182	uint32_t old_rwnd;
4183	int win_probe_recovery = 0;
4184	int win_probe_recovered = 0;
4185	int j, done_once = 0;
4186
4187	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4188		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4189		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4190	}
4191	SCTP_TCB_LOCK_ASSERT(stcb);
4192#ifdef SCTP_ASOCLOG_OF_TSNS
4193	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
4194	stcb->asoc.cumack_log_at++;
4195	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4196		stcb->asoc.cumack_log_at = 0;
4197	}
4198#endif
4199	asoc = &stcb->asoc;
4200	old_rwnd = asoc->peers_rwnd;
4201	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
4202		/* old ack */
4203		return;
4204	} else if (asoc->last_acked_seq == cumack) {
4205		/* Window update sack */
4206		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4207		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4208		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4209			/* SWS sender side engages */
4210			asoc->peers_rwnd = 0;
4211		}
4212		if (asoc->peers_rwnd > old_rwnd) {
4213			goto again;
4214		}
4215		return;
4216	}
4217	/* First setup for CC stuff */
4218	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4219		net->prev_cwnd = net->cwnd;
4220		net->net_ack = 0;
4221		net->net_ack2 = 0;
4222
4223		/*
4224		 * CMT: Reset CUC and Fast recovery algo variables before
4225		 * SACK processing
4226		 */
4227		net->new_pseudo_cumack = 0;
4228		net->will_exit_fast_recovery = 0;
4229	}
4230	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4231		uint32_t send_s;
4232
4233		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4234			tp1 = TAILQ_LAST(&asoc->sent_queue,
4235			    sctpchunk_listhead);
4236			send_s = tp1->rec.data.TSN_seq + 1;
4237		} else {
4238			send_s = asoc->sending_seq;
4239		}
4240		if ((cumack == send_s) ||
4241		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
4242#ifndef INVARIANTS
4243			struct mbuf *oper;
4244
4245#endif
4246#ifdef INVARIANTS
4247			panic("Impossible sack 1");
4248#else
4249			*abort_now = 1;
4250			/* XXX */
4251			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4252			    0, M_DONTWAIT, 1, MT_DATA);
4253			if (oper) {
4254				struct sctp_paramhdr *ph;
4255				uint32_t *ippp;
4256
4257				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4258				    sizeof(uint32_t);
4259				ph = mtod(oper, struct sctp_paramhdr *);
4260				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4261				ph->param_length = htons(SCTP_BUF_LEN(oper));
4262				ippp = (uint32_t *) (ph + 1);
4263				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4264			}
4265			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4266			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4267			return;
4268#endif
4269		}
4270	}
4271	asoc->this_sack_highest_gap = cumack;
4272	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4273		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4274		    stcb->asoc.overall_error_count,
4275		    0,
4276		    SCTP_FROM_SCTP_INDATA,
4277		    __LINE__);
4278	}
4279	stcb->asoc.overall_error_count = 0;
4280	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
4281		/* process the new consecutive TSN first */
4282		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4283		while (tp1) {
4284			tp2 = TAILQ_NEXT(tp1, sctp_next);
4285			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4286			    MAX_TSN) ||
4287			    cumack == tp1->rec.data.TSN_seq) {
4288				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4289					printf("Warning, an unsent is now acked?\n");
4290				}
4291				/*
4292				 * ECN Nonce: Add the nonce to the sender's
4293				 * nonce sum
4294				 */
4295				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4296				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4297					/*
4298					 * If it is less than ACKED, it is
4299					 * now no-longer in flight. Higher
4300					 * values may occur during marking
4301					 */
4302					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4303						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4304							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4305							    tp1->whoTo->flight_size,
4306							    tp1->book_size,
4307							    (uintptr_t) tp1->whoTo,
4308							    tp1->rec.data.TSN_seq);
4309						}
4310						sctp_flight_size_decrease(tp1);
4311						/* sa_ignore NO_NULL_CHK */
4312						sctp_total_flight_decrease(stcb, tp1);
4313					}
4314					tp1->whoTo->net_ack += tp1->send_size;
4315					if (tp1->snd_count < 2) {
4316						/*
4317						 * True non-retransmited
4318						 * chunk
4319						 */
4320						tp1->whoTo->net_ack2 +=
4321						    tp1->send_size;
4322
4323						/* update RTO too? */
4324						if (tp1->do_rtt) {
4325							tp1->whoTo->RTO =
4326							/*
4327							 * sa_ignore
4328							 * NO_NULL_CHK
4329							 */
4330							    sctp_calculate_rto(stcb,
4331							    asoc, tp1->whoTo,
4332							    &tp1->sent_rcv_time,
4333							    sctp_align_safe_nocopy);
4334							tp1->do_rtt = 0;
4335						}
4336					}
4337					/*
4338					 * CMT: CUCv2 algorithm. From the
4339					 * cumack'd TSNs, for each TSN being
4340					 * acked for the first time, set the
4341					 * following variables for the
4342					 * corresp destination.
4343					 * new_pseudo_cumack will trigger a
4344					 * cwnd update.
4345					 * find_(rtx_)pseudo_cumack will
4346					 * trigger search for the next
4347					 * expected (rtx-)pseudo-cumack.
4348					 */
4349					tp1->whoTo->new_pseudo_cumack = 1;
4350					tp1->whoTo->find_pseudo_cumack = 1;
4351					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4352
4353					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4354						/* sa_ignore NO_NULL_CHK */
4355						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4356					}
4357				}
4358				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4359					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4360				}
4361				if (tp1->rec.data.chunk_was_revoked) {
4362					/* deflate the cwnd */
4363					tp1->whoTo->cwnd -= tp1->book_size;
4364					tp1->rec.data.chunk_was_revoked = 0;
4365				}
4366				tp1->sent = SCTP_DATAGRAM_ACKED;
4367				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4368				if (tp1->data) {
4369					/* sa_ignore NO_NULL_CHK */
4370					sctp_free_bufspace(stcb, asoc, tp1, 1);
4371					sctp_m_freem(tp1->data);
4372				}
4373				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4374					sctp_log_sack(asoc->last_acked_seq,
4375					    cumack,
4376					    tp1->rec.data.TSN_seq,
4377					    0,
4378					    0,
4379					    SCTP_LOG_FREE_SENT);
4380				}
4381				tp1->data = NULL;
4382				asoc->sent_queue_cnt--;
4383				sctp_free_a_chunk(stcb, tp1);
4384				tp1 = tp2;
4385			} else {
4386				break;
4387			}
4388		}
4389
4390	}
4391	/* sa_ignore NO_NULL_CHK */
4392	if (stcb->sctp_socket) {
4393#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4394		struct socket *so;
4395
4396#endif
4397
4398		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4399		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4400			/* sa_ignore NO_NULL_CHK */
4401			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4402		}
4403#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4404		so = SCTP_INP_SO(stcb->sctp_ep);
4405		atomic_add_int(&stcb->asoc.refcnt, 1);
4406		SCTP_TCB_UNLOCK(stcb);
4407		SCTP_SOCKET_LOCK(so, 1);
4408		SCTP_TCB_LOCK(stcb);
4409		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4410		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4411			/* assoc was freed while we were unlocked */
4412			SCTP_SOCKET_UNLOCK(so, 1);
4413			return;
4414		}
4415#endif
4416		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4417#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4418		SCTP_SOCKET_UNLOCK(so, 1);
4419#endif
4420	} else {
4421		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4422			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4423		}
4424	}
4425
4426	/* JRS - Use the congestion control given in the CC module */
4427	if (asoc->last_acked_seq != cumack)
4428		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4429
4430	asoc->last_acked_seq = cumack;
4431
4432	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4433		/* nothing left in-flight */
4434		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4435			net->flight_size = 0;
4436			net->partial_bytes_acked = 0;
4437		}
4438		asoc->total_flight = 0;
4439		asoc->total_flight_count = 0;
4440	}
4441	/* ECN Nonce updates */
4442	if (asoc->ecn_nonce_allowed) {
4443		if (asoc->nonce_sum_check) {
4444			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4445				if (asoc->nonce_wait_for_ecne == 0) {
4446					struct sctp_tmit_chunk *lchk;
4447
4448					lchk = TAILQ_FIRST(&asoc->send_queue);
4449					asoc->nonce_wait_for_ecne = 1;
4450					if (lchk) {
4451						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4452					} else {
4453						asoc->nonce_wait_tsn = asoc->sending_seq;
4454					}
4455				} else {
4456					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4457					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4458						/*
4459						 * Misbehaving peer. We need
4460						 * to react to this guy
4461						 */
4462						asoc->ecn_allowed = 0;
4463						asoc->ecn_nonce_allowed = 0;
4464					}
4465				}
4466			}
4467		} else {
4468			/* See if Resynchronization Possible */
4469			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4470				asoc->nonce_sum_check = 1;
4471				/*
4472				 * now we must calculate what the base is.
4473				 * We do this based on two things, we know
4474				 * the total's for all the segments
4475				 * gap-acked in the SACK (none), We also
4476				 * know the SACK's nonce sum, its in
4477				 * nonce_sum_flag. So we can build a truth
4478				 * table to back-calculate the new value of
4479				 * asoc->nonce_sum_expect_base:
4480				 *
4481				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4482				 * 1                    0 1 0 1 1 1
4483				 * 1 0
4484				 */
4485				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4486			}
4487		}
4488	}
4489	/* RWND update */
4490	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4491	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4492	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4493		/* SWS sender side engages */
4494		asoc->peers_rwnd = 0;
4495	}
4496	if (asoc->peers_rwnd > old_rwnd) {
4497		win_probe_recovery = 1;
4498	}
4499	/* Now assure a timer where data is queued at */
4500again:
4501	j = 0;
4502	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4503		int to_ticks;
4504
4505		if (win_probe_recovery && (net->window_probe)) {
4506			win_probe_recovered = 1;
4507			/*
4508			 * Find first chunk that was used with window probe
4509			 * and clear the sent
4510			 */
4511			/* sa_ignore FREED_MEMORY */
4512			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4513				if (tp1->window_probe) {
4514					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4515					break;
4516				}
4517			}
4518		}
4519		if (net->RTO == 0) {
4520			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4521		} else {
4522			to_ticks = MSEC_TO_TICKS(net->RTO);
4523		}
4524		if (net->flight_size) {
4525			j++;
4526			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4527			    sctp_timeout_handler, &net->rxt_timer);
4528			if (net->window_probe) {
4529				net->window_probe = 0;
4530			}
4531		} else {
4532			if (net->window_probe) {
4533				/*
4534				 * In window probes we must assure a timer
4535				 * is still running there
4536				 */
4537				net->window_probe = 0;
4538				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4539					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4540					    sctp_timeout_handler, &net->rxt_timer);
4541				}
4542			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4543				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4544				    stcb, net,
4545				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4546			}
4547			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4548				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4549					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4550					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4551					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4552				}
4553			}
4554		}
4555	}
4556	if ((j == 0) &&
4557	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4558	    (asoc->sent_queue_retran_cnt == 0) &&
4559	    (win_probe_recovered == 0) &&
4560	    (done_once == 0)) {
4561		/*
4562		 * huh, this should not happen unless all packets are
4563		 * PR-SCTP and marked to skip of course.
4564		 */
4565		if (sctp_fs_audit(asoc)) {
4566			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4567				if (net->flight_size) {
4568					net->flight_size = 0;
4569				}
4570			}
4571			asoc->total_flight = 0;
4572			asoc->total_flight_count = 0;
4573			asoc->sent_queue_retran_cnt = 0;
4574			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4575				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4576					sctp_flight_size_increase(tp1);
4577					sctp_total_flight_increase(stcb, tp1);
4578				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4579					asoc->sent_queue_retran_cnt++;
4580				}
4581			}
4582		}
4583		done_once = 1;
4584		goto again;
4585	}
4586	/**********************************/
4587	/* Now what about shutdown issues */
4588	/**********************************/
4589	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4590		/* nothing left on sendqueue.. consider done */
4591		/* clean up */
4592		if ((asoc->stream_queue_cnt == 1) &&
4593		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4594		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4595		    (asoc->locked_on_sending)
4596		    ) {
4597			struct sctp_stream_queue_pending *sp;
4598
4599			/*
4600			 * I may be in a state where we got all across.. but
4601			 * cannot write more due to a shutdown... we abort
4602			 * since the user did not indicate EOR in this case.
4603			 * The sp will be cleaned during free of the asoc.
4604			 */
4605			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4606			    sctp_streamhead);
4607			if ((sp) && (sp->length == 0)) {
4608				/* Let cleanup code purge it */
4609				if (sp->msg_is_complete) {
4610					asoc->stream_queue_cnt--;
4611				} else {
4612					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4613					asoc->locked_on_sending = NULL;
4614					asoc->stream_queue_cnt--;
4615				}
4616			}
4617		}
4618		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4619		    (asoc->stream_queue_cnt == 0)) {
4620			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4621				/* Need to abort here */
4622				struct mbuf *oper;
4623
4624		abort_out_now:
4625				*abort_now = 1;
4626				/* XXX */
4627				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4628				    0, M_DONTWAIT, 1, MT_DATA);
4629				if (oper) {
4630					struct sctp_paramhdr *ph;
4631					uint32_t *ippp;
4632
4633					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4634					    sizeof(uint32_t);
4635					ph = mtod(oper, struct sctp_paramhdr *);
4636					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4637					ph->param_length = htons(SCTP_BUF_LEN(oper));
4638					ippp = (uint32_t *) (ph + 1);
4639					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4640				}
4641				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4642				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4643			} else {
4644				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4645				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4646					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4647				}
4648				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4649				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4650				sctp_stop_timers_for_shutdown(stcb);
4651				sctp_send_shutdown(stcb,
4652				    stcb->asoc.primary_destination);
4653				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4654				    stcb->sctp_ep, stcb, asoc->primary_destination);
4655				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4656				    stcb->sctp_ep, stcb, asoc->primary_destination);
4657			}
4658		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4659		    (asoc->stream_queue_cnt == 0)) {
4660			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4661				goto abort_out_now;
4662			}
4663			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4664			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4665			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4666			sctp_send_shutdown_ack(stcb,
4667			    stcb->asoc.primary_destination);
4668
4669			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4670			    stcb->sctp_ep, stcb, asoc->primary_destination);
4671		}
4672	}
4673	/*********************************************/
4674	/* Here we perform PR-SCTP procedures        */
4675	/* (section 4.2)                             */
4676	/*********************************************/
4677	/* C1. update advancedPeerAckPoint */
4678	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4679		asoc->advanced_peer_ack_point = cumack;
4680	}
4681	/* PR-Sctp issues need to be addressed too */
4682	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4683		struct sctp_tmit_chunk *lchk;
4684		uint32_t old_adv_peer_ack_point;
4685
4686		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4687		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4688		/* C3. See if we need to send a Fwd-TSN */
4689		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4690		    MAX_TSN)) {
4691			/*
4692			 * ISSUE with ECN, see FWD-TSN processing for notes
4693			 * on issues that will occur when the ECN NONCE
4694			 * stuff is put into SCTP for cross checking.
4695			 */
4696			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4697			    MAX_TSN)) {
4698				send_forward_tsn(stcb, asoc);
4699				/*
4700				 * ECN Nonce: Disable Nonce Sum check when
4701				 * FWD TSN is sent and store resync tsn
4702				 */
4703				asoc->nonce_sum_check = 0;
4704				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4705			} else if (lchk) {
4706				/* try to FR fwd-tsn's that get lost too */
4707				lchk->rec.data.fwd_tsn_cnt++;
4708				if (lchk->rec.data.fwd_tsn_cnt > 3) {
4709					send_forward_tsn(stcb, asoc);
4710					lchk->rec.data.fwd_tsn_cnt = 0;
4711				}
4712			}
4713		}
4714		if (lchk) {
4715			/* Assure a timer is up */
4716			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4717			    stcb->sctp_ep, stcb, lchk->whoTo);
4718		}
4719	}
4720	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4721		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4722		    rwnd,
4723		    stcb->asoc.peers_rwnd,
4724		    stcb->asoc.total_flight,
4725		    stcb->asoc.total_output_queue_size);
4726	}
4727}
4728
4729void
4730sctp_handle_sack(struct mbuf *m, int offset,
4731    struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4732    struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4733{
4734	struct sctp_association *asoc;
4735	struct sctp_sack *sack;
4736	struct sctp_tmit_chunk *tp1, *tp2;
4737	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4738	         this_sack_lowest_newack;
4739	uint32_t sav_cum_ack;
4740	uint16_t num_seg, num_dup;
4741	uint16_t wake_him = 0;
4742	unsigned int sack_length;
4743	uint32_t send_s = 0;
4744	long j;
4745	int accum_moved = 0;
4746	int will_exit_fast_recovery = 0;
4747	uint32_t a_rwnd, old_rwnd;
4748	int win_probe_recovery = 0;
4749	int win_probe_recovered = 0;
4750	struct sctp_nets *net = NULL;
4751	int nonce_sum_flag, ecn_seg_sums = 0;
4752	int done_once;
4753	uint8_t reneged_all = 0;
4754	uint8_t cmt_dac_flag;
4755
4756	/*
4757	 * we take any chance we can to service our queues since we cannot
4758	 * get awoken when the socket is read from :<
4759	 */
4760	/*
4761	 * Now perform the actual SACK handling: 1) Verify that it is not an
4762	 * old sack, if so discard. 2) If there is nothing left in the send
4763	 * queue (cum-ack is equal to last acked) then you have a duplicate
4764	 * too, update any rwnd change and verify no timers are running.
4765	 * then return. 3) Process any new consequtive data i.e. cum-ack
4766	 * moved process these first and note that it moved. 4) Process any
4767	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4768	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4769	 * sync up flightsizes and things, stop all timers and also check
4770	 * for shutdown_pending state. If so then go ahead and send off the
4771	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4772	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4773	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4774	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4775	 * if in shutdown_recv state.
4776	 */
4777	SCTP_TCB_LOCK_ASSERT(stcb);
4778	sack = &ch->sack;
4779	/* CMT DAC algo */
4780	this_sack_lowest_newack = 0;
4781	j = 0;
4782	sack_length = (unsigned int)sack_len;
4783	/* ECN Nonce */
4784	SCTP_STAT_INCR(sctps_slowpath_sack);
4785	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4786	cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4787#ifdef SCTP_ASOCLOG_OF_TSNS
4788	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4789	stcb->asoc.cumack_log_at++;
4790	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4791		stcb->asoc.cumack_log_at = 0;
4792	}
4793#endif
4794	num_seg = ntohs(sack->num_gap_ack_blks);
4795	a_rwnd = rwnd;
4796
4797	/* CMT DAC algo */
4798	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4799	num_dup = ntohs(sack->num_dup_tsns);
4800
4801	old_rwnd = stcb->asoc.peers_rwnd;
4802	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4803		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4804		    stcb->asoc.overall_error_count,
4805		    0,
4806		    SCTP_FROM_SCTP_INDATA,
4807		    __LINE__);
4808	}
4809	stcb->asoc.overall_error_count = 0;
4810	asoc = &stcb->asoc;
4811	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4812		sctp_log_sack(asoc->last_acked_seq,
4813		    cum_ack,
4814		    0,
4815		    num_seg,
4816		    num_dup,
4817		    SCTP_LOG_NEW_SACK);
4818	}
4819	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4820		int off_to_dup, iii;
4821		uint32_t *dupdata, dblock;
4822
4823		off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4824		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4825			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4826			    sizeof(uint32_t), (uint8_t *) & dblock);
4827			off_to_dup += sizeof(uint32_t);
4828			if (dupdata) {
4829				for (iii = 0; iii < num_dup; iii++) {
4830					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4831					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4832					    sizeof(uint32_t), (uint8_t *) & dblock);
4833					if (dupdata == NULL)
4834						break;
4835					off_to_dup += sizeof(uint32_t);
4836				}
4837			}
4838		} else {
4839			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4840			    off_to_dup, num_dup, sack_length, num_seg);
4841		}
4842	}
4843	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4844		/* reality check */
4845		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4846			tp1 = TAILQ_LAST(&asoc->sent_queue,
4847			    sctpchunk_listhead);
4848			send_s = tp1->rec.data.TSN_seq + 1;
4849		} else {
4850			send_s = asoc->sending_seq;
4851		}
4852		if (cum_ack == send_s ||
4853		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4854#ifndef INVARIANTS
4855			struct mbuf *oper;
4856
4857#endif
4858#ifdef INVARIANTS
4859	hopeless_peer:
4860			panic("Impossible sack 1");
4861#else
4862
4863
4864			/*
4865			 * no way, we have not even sent this TSN out yet.
4866			 * Peer is hopelessly messed up with us.
4867			 */
4868	hopeless_peer:
4869			*abort_now = 1;
4870			/* XXX */
4871			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4872			    0, M_DONTWAIT, 1, MT_DATA);
4873			if (oper) {
4874				struct sctp_paramhdr *ph;
4875				uint32_t *ippp;
4876
4877				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4878				    sizeof(uint32_t);
4879				ph = mtod(oper, struct sctp_paramhdr *);
4880				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4881				ph->param_length = htons(SCTP_BUF_LEN(oper));
4882				ippp = (uint32_t *) (ph + 1);
4883				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4884			}
4885			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4886			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4887			return;
4888#endif
4889		}
4890	}
4891	/**********************/
4892	/* 1) check the range */
4893	/**********************/
4894	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4895		/* acking something behind */
4896		return;
4897	}
4898	sav_cum_ack = asoc->last_acked_seq;
4899
4900	/* update the Rwnd of the peer */
4901	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4902	    TAILQ_EMPTY(&asoc->send_queue) &&
4903	    (asoc->stream_queue_cnt == 0)
4904	    ) {
4905		/* nothing left on send/sent and strmq */
4906		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4907			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4908			    asoc->peers_rwnd, 0, 0, a_rwnd);
4909		}
4910		asoc->peers_rwnd = a_rwnd;
4911		if (asoc->sent_queue_retran_cnt) {
4912			asoc->sent_queue_retran_cnt = 0;
4913		}
4914		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4915			/* SWS sender side engages */
4916			asoc->peers_rwnd = 0;
4917		}
4918		/* stop any timers */
4919		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4920			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4921			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4922			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4923				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4924					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4925					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4926					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4927				}
4928			}
4929			net->partial_bytes_acked = 0;
4930			net->flight_size = 0;
4931		}
4932		asoc->total_flight = 0;
4933		asoc->total_flight_count = 0;
4934		return;
4935	}
4936	/*
4937	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4938	 * things. The total byte count acked is tracked in netAckSz AND
4939	 * netAck2 is used to track the total bytes acked that are un-
4940	 * amibguious and were never retransmitted. We track these on a per
4941	 * destination address basis.
4942	 */
4943	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4944		net->prev_cwnd = net->cwnd;
4945		net->net_ack = 0;
4946		net->net_ack2 = 0;
4947
4948		/*
4949		 * CMT: Reset CUC and Fast recovery algo variables before
4950		 * SACK processing
4951		 */
4952		net->new_pseudo_cumack = 0;
4953		net->will_exit_fast_recovery = 0;
4954	}
4955	/* process the new consecutive TSN first */
4956	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4957	while (tp1) {
4958		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4959		    MAX_TSN) ||
4960		    last_tsn == tp1->rec.data.TSN_seq) {
4961			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4962				/*
4963				 * ECN Nonce: Add the nonce to the sender's
4964				 * nonce sum
4965				 */
4966				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4967				accum_moved = 1;
4968				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4969					/*
4970					 * If it is less than ACKED, it is
4971					 * now no-longer in flight. Higher
4972					 * values may occur during marking
4973					 */
4974					if ((tp1->whoTo->dest_state &
4975					    SCTP_ADDR_UNCONFIRMED) &&
4976					    (tp1->snd_count < 2)) {
4977						/*
4978						 * If there was no retran
4979						 * and the address is
4980						 * un-confirmed and we sent
4981						 * there and are now
4982						 * sacked.. its confirmed,
4983						 * mark it so.
4984						 */
4985						tp1->whoTo->dest_state &=
4986						    ~SCTP_ADDR_UNCONFIRMED;
4987					}
4988					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4989						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4990							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4991							    tp1->whoTo->flight_size,
4992							    tp1->book_size,
4993							    (uintptr_t) tp1->whoTo,
4994							    tp1->rec.data.TSN_seq);
4995						}
4996						sctp_flight_size_decrease(tp1);
4997						sctp_total_flight_decrease(stcb, tp1);
4998					}
4999					tp1->whoTo->net_ack += tp1->send_size;
5000
5001					/* CMT SFR and DAC algos */
5002					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
5003					tp1->whoTo->saw_newack = 1;
5004
5005					if (tp1->snd_count < 2) {
5006						/*
5007						 * True non-retransmited
5008						 * chunk
5009						 */
5010						tp1->whoTo->net_ack2 +=
5011						    tp1->send_size;
5012
5013						/* update RTO too? */
5014						if (tp1->do_rtt) {
5015							tp1->whoTo->RTO =
5016							    sctp_calculate_rto(stcb,
5017							    asoc, tp1->whoTo,
5018							    &tp1->sent_rcv_time,
5019							    sctp_align_safe_nocopy);
5020							tp1->do_rtt = 0;
5021						}
5022					}
5023					/*
5024					 * CMT: CUCv2 algorithm. From the
5025					 * cumack'd TSNs, for each TSN being
5026					 * acked for the first time, set the
5027					 * following variables for the
5028					 * corresp destination.
5029					 * new_pseudo_cumack will trigger a
5030					 * cwnd update.
5031					 * find_(rtx_)pseudo_cumack will
5032					 * trigger search for the next
5033					 * expected (rtx-)pseudo-cumack.
5034					 */
5035					tp1->whoTo->new_pseudo_cumack = 1;
5036					tp1->whoTo->find_pseudo_cumack = 1;
5037					tp1->whoTo->find_rtx_pseudo_cumack = 1;
5038
5039
5040					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5041						sctp_log_sack(asoc->last_acked_seq,
5042						    cum_ack,
5043						    tp1->rec.data.TSN_seq,
5044						    0,
5045						    0,
5046						    SCTP_LOG_TSN_ACKED);
5047					}
5048					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
5049						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
5050					}
5051				}
5052				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5053					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
5054#ifdef SCTP_AUDITING_ENABLED
5055					sctp_audit_log(0xB3,
5056					    (asoc->sent_queue_retran_cnt & 0x000000ff));
5057#endif
5058				}
5059				if (tp1->rec.data.chunk_was_revoked) {
5060					/* deflate the cwnd */
5061					tp1->whoTo->cwnd -= tp1->book_size;
5062					tp1->rec.data.chunk_was_revoked = 0;
5063				}
5064				tp1->sent = SCTP_DATAGRAM_ACKED;
5065			}
5066		} else {
5067			break;
5068		}
5069		tp1 = TAILQ_NEXT(tp1, sctp_next);
5070	}
5071	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
5072	/* always set this up to cum-ack */
5073	asoc->this_sack_highest_gap = last_tsn;
5074
5075	/* Move offset up to point to gaps/dups */
5076	offset += sizeof(struct sctp_sack_chunk);
5077	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
5078
5079		/* skip corrupt segments */
5080		goto skip_segments;
5081	}
5082	if (num_seg > 0) {
5083
5084		/*
5085		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
5086		 * to be greater than the cumack. Also reset saw_newack to 0
5087		 * for all dests.
5088		 */
5089		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5090			net->saw_newack = 0;
5091			net->this_sack_highest_newack = last_tsn;
5092		}
5093
5094		/*
5095		 * thisSackHighestGap will increase while handling NEW
5096		 * segments this_sack_highest_newack will increase while
5097		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
5098		 * used for CMT DAC algo. saw_newack will also change.
5099		 */
5100		sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
5101		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
5102		    num_seg, &ecn_seg_sums);
5103
5104		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
5105			/*
5106			 * validate the biggest_tsn_acked in the gap acks if
5107			 * strict adherence is wanted.
5108			 */
5109			if ((biggest_tsn_acked == send_s) ||
5110			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
5111				/*
5112				 * peer is either confused or we are under
5113				 * attack. We must abort.
5114				 */
5115				goto hopeless_peer;
5116			}
5117		}
5118	}
5119skip_segments:
5120	/*******************************************/
5121	/* cancel ALL T3-send timer if accum moved */
5122	/*******************************************/
5123	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
5124		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5125			if (net->new_pseudo_cumack)
5126				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5127				    stcb, net,
5128				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
5129
5130		}
5131	} else {
5132		if (accum_moved) {
5133			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5134				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5135				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
5136			}
5137		}
5138	}
5139	/********************************************/
5140	/* drop the acked chunks from the sendqueue */
5141	/********************************************/
5142	asoc->last_acked_seq = cum_ack;
5143
5144	tp1 = TAILQ_FIRST(&asoc->sent_queue);
5145	if (tp1 == NULL)
5146		goto done_with_it;
5147	do {
5148		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
5149		    MAX_TSN)) {
5150			break;
5151		}
5152		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
5153			/* no more sent on list */
5154			printf("Warning, tp1->sent == %d and its now acked?\n",
5155			    tp1->sent);
5156		}
5157		tp2 = TAILQ_NEXT(tp1, sctp_next);
5158		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
5159		if (tp1->pr_sctp_on) {
5160			if (asoc->pr_sctp_cnt != 0)
5161				asoc->pr_sctp_cnt--;
5162		}
5163		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
5164		    (asoc->total_flight > 0)) {
5165#ifdef INVARIANTS
5166			panic("Warning flight size is postive and should be 0");
5167#else
5168			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
5169			    asoc->total_flight);
5170#endif
5171			asoc->total_flight = 0;
5172		}
5173		if (tp1->data) {
5174			/* sa_ignore NO_NULL_CHK */
5175			sctp_free_bufspace(stcb, asoc, tp1, 1);
5176			sctp_m_freem(tp1->data);
5177			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5178				asoc->sent_queue_cnt_removeable--;
5179			}
5180		}
5181		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5182			sctp_log_sack(asoc->last_acked_seq,
5183			    cum_ack,
5184			    tp1->rec.data.TSN_seq,
5185			    0,
5186			    0,
5187			    SCTP_LOG_FREE_SENT);
5188		}
5189		tp1->data = NULL;
5190		asoc->sent_queue_cnt--;
5191		sctp_free_a_chunk(stcb, tp1);
5192		wake_him++;
5193		tp1 = tp2;
5194	} while (tp1 != NULL);
5195
5196done_with_it:
5197	/* sa_ignore NO_NULL_CHK */
5198	if ((wake_him) && (stcb->sctp_socket)) {
5199#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5200		struct socket *so;
5201
5202#endif
5203		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
5204		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5205			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
5206		}
5207#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5208		so = SCTP_INP_SO(stcb->sctp_ep);
5209		atomic_add_int(&stcb->asoc.refcnt, 1);
5210		SCTP_TCB_UNLOCK(stcb);
5211		SCTP_SOCKET_LOCK(so, 1);
5212		SCTP_TCB_LOCK(stcb);
5213		atomic_subtract_int(&stcb->asoc.refcnt, 1);
5214		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5215			/* assoc was freed while we were unlocked */
5216			SCTP_SOCKET_UNLOCK(so, 1);
5217			return;
5218		}
5219#endif
5220		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
5221#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5222		SCTP_SOCKET_UNLOCK(so, 1);
5223#endif
5224	} else {
5225		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5226			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
5227		}
5228	}
5229
5230	if (asoc->fast_retran_loss_recovery && accum_moved) {
5231		if (compare_with_wrap(asoc->last_acked_seq,
5232		    asoc->fast_recovery_tsn, MAX_TSN) ||
5233		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
5234			/* Setup so we will exit RFC2582 fast recovery */
5235			will_exit_fast_recovery = 1;
5236		}
5237	}
5238	/*
5239	 * Check for revoked fragments:
5240	 *
5241	 * if Previous sack - Had no frags then we can't have any revoked if
5242	 * Previous sack - Had frag's then - If we now have frags aka
5243	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
5244	 * some of them. else - The peer revoked all ACKED fragments, since
5245	 * we had some before and now we have NONE.
5246	 */
5247
5248	if (num_seg)
5249		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
5250	else if (asoc->saw_sack_with_frags) {
5251		int cnt_revoked = 0;
5252
5253		tp1 = TAILQ_FIRST(&asoc->sent_queue);
5254		if (tp1 != NULL) {
5255			/* Peer revoked all dg's marked or acked */
5256			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5257				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
5258				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
5259					tp1->sent = SCTP_DATAGRAM_SENT;
5260					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5261						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
5262						    tp1->whoTo->flight_size,
5263						    tp1->book_size,
5264						    (uintptr_t) tp1->whoTo,
5265						    tp1->rec.data.TSN_seq);
5266					}
5267					sctp_flight_size_increase(tp1);
5268					sctp_total_flight_increase(stcb, tp1);
5269					tp1->rec.data.chunk_was_revoked = 1;
5270					/*
5271					 * To ensure that this increase in
5272					 * flightsize, which is artificial,
5273					 * does not throttle the sender, we
5274					 * also increase the cwnd
5275					 * artificially.
5276					 */
5277					tp1->whoTo->cwnd += tp1->book_size;
5278					cnt_revoked++;
5279				}
5280			}
5281			if (cnt_revoked) {
5282				reneged_all = 1;
5283			}
5284		}
5285		asoc->saw_sack_with_frags = 0;
5286	}
5287	if (num_seg)
5288		asoc->saw_sack_with_frags = 1;
5289	else
5290		asoc->saw_sack_with_frags = 0;
5291
5292	/* JRS - Use the congestion control given in the CC module */
5293	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5294
5295	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5296		/* nothing left in-flight */
5297		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5298			/* stop all timers */
5299			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5300				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5301					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5302					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5303					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5304				}
5305			}
5306			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5307			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5308			net->flight_size = 0;
5309			net->partial_bytes_acked = 0;
5310		}
5311		asoc->total_flight = 0;
5312		asoc->total_flight_count = 0;
5313	}
5314	/**********************************/
5315	/* Now what about shutdown issues */
5316	/**********************************/
5317	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5318		/* nothing left on sendqueue.. consider done */
5319		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5320			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5321			    asoc->peers_rwnd, 0, 0, a_rwnd);
5322		}
5323		asoc->peers_rwnd = a_rwnd;
5324		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5325			/* SWS sender side engages */
5326			asoc->peers_rwnd = 0;
5327		}
5328		/* clean up */
5329		if ((asoc->stream_queue_cnt == 1) &&
5330		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5331		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5332		    (asoc->locked_on_sending)
5333		    ) {
5334			struct sctp_stream_queue_pending *sp;
5335
5336			/*
5337			 * I may be in a state where we got all across.. but
5338			 * cannot write more due to a shutdown... we abort
5339			 * since the user did not indicate EOR in this case.
5340			 */
5341			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5342			    sctp_streamhead);
5343			if ((sp) && (sp->length == 0)) {
5344				asoc->locked_on_sending = NULL;
5345				if (sp->msg_is_complete) {
5346					asoc->stream_queue_cnt--;
5347				} else {
5348					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5349					asoc->stream_queue_cnt--;
5350				}
5351			}
5352		}
5353		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5354		    (asoc->stream_queue_cnt == 0)) {
5355			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5356				/* Need to abort here */
5357				struct mbuf *oper;
5358
5359		abort_out_now:
5360				*abort_now = 1;
5361				/* XXX */
5362				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5363				    0, M_DONTWAIT, 1, MT_DATA);
5364				if (oper) {
5365					struct sctp_paramhdr *ph;
5366					uint32_t *ippp;
5367
5368					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5369					    sizeof(uint32_t);
5370					ph = mtod(oper, struct sctp_paramhdr *);
5371					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5372					ph->param_length = htons(SCTP_BUF_LEN(oper));
5373					ippp = (uint32_t *) (ph + 1);
5374					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5375				}
5376				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5377				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5378				return;
5379			} else {
5380				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5381				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5382					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5383				}
5384				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5385				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5386				sctp_stop_timers_for_shutdown(stcb);
5387				sctp_send_shutdown(stcb,
5388				    stcb->asoc.primary_destination);
5389				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5390				    stcb->sctp_ep, stcb, asoc->primary_destination);
5391				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5392				    stcb->sctp_ep, stcb, asoc->primary_destination);
5393			}
5394			return;
5395		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5396		    (asoc->stream_queue_cnt == 0)) {
5397			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5398				goto abort_out_now;
5399			}
5400			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5401			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5402			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5403			sctp_send_shutdown_ack(stcb,
5404			    stcb->asoc.primary_destination);
5405
5406			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5407			    stcb->sctp_ep, stcb, asoc->primary_destination);
5408			return;
5409		}
5410	}
5411	/*
5412	 * Now here we are going to recycle net_ack for a different use...
5413	 * HEADS UP.
5414	 */
5415	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5416		net->net_ack = 0;
5417	}
5418
5419	/*
5420	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5421	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5422	 * automatically ensure that.
5423	 */
5424	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5425		this_sack_lowest_newack = cum_ack;
5426	}
5427	if (num_seg > 0) {
5428		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5429		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5430	}
5431	/* JRS - Use the congestion control given in the CC module */
5432	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5433
5434	/******************************************************************
5435	 *  Here we do the stuff with ECN Nonce checking.
5436	 *  We basically check to see if the nonce sum flag was incorrect
5437	 *  or if resynchronization needs to be done. Also if we catch a
5438	 *  misbehaving receiver we give him the kick.
5439	 ******************************************************************/
5440
5441	if (asoc->ecn_nonce_allowed) {
5442		if (asoc->nonce_sum_check) {
5443			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5444				if (asoc->nonce_wait_for_ecne == 0) {
5445					struct sctp_tmit_chunk *lchk;
5446
5447					lchk = TAILQ_FIRST(&asoc->send_queue);
5448					asoc->nonce_wait_for_ecne = 1;
5449					if (lchk) {
5450						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5451					} else {
5452						asoc->nonce_wait_tsn = asoc->sending_seq;
5453					}
5454				} else {
5455					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5456					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5457						/*
5458						 * Misbehaving peer. We need
5459						 * to react to this guy
5460						 */
5461						asoc->ecn_allowed = 0;
5462						asoc->ecn_nonce_allowed = 0;
5463					}
5464				}
5465			}
5466		} else {
5467			/* See if Resynchronization Possible */
5468			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5469				asoc->nonce_sum_check = 1;
5470				/*
5471				 * now we must calculate what the base is.
5472				 * We do this based on two things, we know
5473				 * the total's for all the segments
5474				 * gap-acked in the SACK, its stored in
5475				 * ecn_seg_sums. We also know the SACK's
5476				 * nonce sum, its in nonce_sum_flag. So we
5477				 * can build a truth table to back-calculate
5478				 * the new value of
5479				 * asoc->nonce_sum_expect_base:
5480				 *
5481				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5482				 * 1                    0 1 0 1 1 1
5483				 * 1 0
5484				 */
5485				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5486			}
5487		}
5488	}
5489	/* Now are we exiting loss recovery ? */
5490	if (will_exit_fast_recovery) {
5491		/* Ok, we must exit fast recovery */
5492		asoc->fast_retran_loss_recovery = 0;
5493	}
5494	if ((asoc->sat_t3_loss_recovery) &&
5495	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5496	    MAX_TSN) ||
5497	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5498		/* end satellite t3 loss recovery */
5499		asoc->sat_t3_loss_recovery = 0;
5500	}
5501	/*
5502	 * CMT Fast recovery
5503	 */
5504	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5505		if (net->will_exit_fast_recovery) {
5506			/* Ok, we must exit fast recovery */
5507			net->fast_retran_loss_recovery = 0;
5508		}
5509	}
5510
5511	/* Adjust and set the new rwnd value */
5512	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5513		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5514		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5515	}
5516	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5517	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5518	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5519		/* SWS sender side engages */
5520		asoc->peers_rwnd = 0;
5521	}
5522	if (asoc->peers_rwnd > old_rwnd) {
5523		win_probe_recovery = 1;
5524	}
5525	/*
5526	 * Now we must setup so we have a timer up for anyone with
5527	 * outstanding data.
5528	 */
5529	done_once = 0;
5530again:
5531	j = 0;
5532	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5533		if (win_probe_recovery && (net->window_probe)) {
5534			win_probe_recovered = 1;
5535			/*-
5536			 * Find first chunk that was used with
5537			 * window probe and clear the event. Put
5538			 * it back into the send queue as if has
5539			 * not been sent.
5540			 */
5541			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5542				if (tp1->window_probe) {
5543					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5544					break;
5545				}
5546			}
5547		}
5548		if (net->flight_size) {
5549			j++;
5550			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5551			    stcb->sctp_ep, stcb, net);
5552			if (net->window_probe) {
5553			}
5554		} else {
5555			if (net->window_probe) {
5556				/*
5557				 * In window probes we must assure a timer
5558				 * is still running there
5559				 */
5560
5561				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5562					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5563					    stcb->sctp_ep, stcb, net);
5564
5565				}
5566			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5567				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5568				    stcb, net,
5569				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5570			}
5571			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5572				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5573					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5574					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5575					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5576				}
5577			}
5578		}
5579	}
5580	if ((j == 0) &&
5581	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5582	    (asoc->sent_queue_retran_cnt == 0) &&
5583	    (win_probe_recovered == 0) &&
5584	    (done_once == 0)) {
5585		/*
5586		 * huh, this should not happen unless all packets are
5587		 * PR-SCTP and marked to skip of course.
5588		 */
5589		if (sctp_fs_audit(asoc)) {
5590			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5591				net->flight_size = 0;
5592			}
5593			asoc->total_flight = 0;
5594			asoc->total_flight_count = 0;
5595			asoc->sent_queue_retran_cnt = 0;
5596			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5597				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5598					sctp_flight_size_increase(tp1);
5599					sctp_total_flight_increase(stcb, tp1);
5600				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5601					asoc->sent_queue_retran_cnt++;
5602				}
5603			}
5604		}
5605		done_once = 1;
5606		goto again;
5607	}
5608	/* Fix up the a-p-a-p for future PR-SCTP sends */
5609	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5610		asoc->advanced_peer_ack_point = cum_ack;
5611	}
5612	/* C2. try to further move advancedPeerAckPoint ahead */
5613	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5614		struct sctp_tmit_chunk *lchk;
5615		uint32_t old_adv_peer_ack_point;
5616
5617		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5618		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5619		/* C3. See if we need to send a Fwd-TSN */
5620		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5621		    MAX_TSN)) {
5622			/*
5623			 * ISSUE with ECN, see FWD-TSN processing for notes
5624			 * on issues that will occur when the ECN NONCE
5625			 * stuff is put into SCTP for cross checking.
5626			 */
5627			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5628				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5629				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5630				    old_adv_peer_ack_point);
5631			}
5632			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5633			    MAX_TSN)) {
5634				send_forward_tsn(stcb, asoc);
5635				/*
5636				 * ECN Nonce: Disable Nonce Sum check when
5637				 * FWD TSN is sent and store resync tsn
5638				 */
5639				asoc->nonce_sum_check = 0;
5640				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5641			} else if (lchk) {
5642				/* try to FR fwd-tsn's that get lost too */
5643				lchk->rec.data.fwd_tsn_cnt++;
5644				if (lchk->rec.data.fwd_tsn_cnt > 3) {
5645					send_forward_tsn(stcb, asoc);
5646					lchk->rec.data.fwd_tsn_cnt = 0;
5647				}
5648			}
5649		}
5650		if (lchk) {
5651			/* Assure a timer is up */
5652			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5653			    stcb->sctp_ep, stcb, lchk->whoTo);
5654		}
5655	}
5656	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5657		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5658		    a_rwnd,
5659		    stcb->asoc.peers_rwnd,
5660		    stcb->asoc.total_flight,
5661		    stcb->asoc.total_output_queue_size);
5662	}
5663}
5664
5665void
5666sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5667    struct sctp_nets *netp, int *abort_flag)
5668{
5669	/* Copy cum-ack */
5670	uint32_t cum_ack, a_rwnd;
5671
5672	cum_ack = ntohl(cp->cumulative_tsn_ack);
5673	/* Arrange so a_rwnd does NOT change */
5674	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5675
5676	/* Now call the express sack handling */
5677	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5678}
5679
5680static void
5681sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5682    struct sctp_stream_in *strmin)
5683{
5684	struct sctp_queued_to_read *ctl, *nctl;
5685	struct sctp_association *asoc;
5686	int tt;
5687
5688	/* EY -used to calculate nr_gap information */
5689	uint32_t nr_tsn, nr_gap;
5690
5691	asoc = &stcb->asoc;
5692	tt = strmin->last_sequence_delivered;
5693	/*
5694	 * First deliver anything prior to and including the stream no that
5695	 * came in
5696	 */
5697	ctl = TAILQ_FIRST(&strmin->inqueue);
5698	while (ctl) {
5699		nctl = TAILQ_NEXT(ctl, next);
5700		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5701		    (tt == ctl->sinfo_ssn)) {
5702			/* this is deliverable now */
5703			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5704			/* subtract pending on streams */
5705			asoc->size_on_all_streams -= ctl->length;
5706			sctp_ucount_decr(asoc->cnt_on_all_streams);
5707			/* deliver it to at least the delivery-q */
5708			if (stcb->sctp_socket) {
5709				/* EY need the tsn info for calculating nr */
5710				nr_tsn = ctl->sinfo_tsn;
5711				sctp_add_to_readq(stcb->sctp_ep, stcb,
5712				    ctl,
5713				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5714				/*
5715				 * EY this is the chunk that should be
5716				 * tagged nr gapped calculate the gap and
5717				 * such then tag this TSN nr
5718				 * chk->rec.data.TSN_seq
5719				 */
5720				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5721
5722					if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
5723						nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
5724					} else {
5725						nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
5726					}
5727					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5728					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5729						/*
5730						 * EY These should never
5731						 * happen- explained before
5732						 */
5733					} else {
5734						SCTP_TCB_LOCK_ASSERT(stcb);
5735						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5736						if (compare_with_wrap(nr_tsn,
5737						    asoc->highest_tsn_inside_nr_map,
5738						    MAX_TSN))
5739							asoc->highest_tsn_inside_nr_map = nr_tsn;
5740					}
5741
5742					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5743						/*
5744						 * printf("In
5745						 * sctp_kick_prsctp_reorder_q
5746						 * ueue(7): Something wrong,
5747						 * the TSN to be tagged"
5748						 * "\nas NR is not even in
5749						 * the mapping_array, or map
5750						 * and nr_map are
5751						 * inconsistent");
5752						 */
5753						/*
5754						 * EY - not %100 sure about
5755						 * the lock thing, don't
5756						 * think its required
5757						 */
5758						/*
5759						 * SCTP_TCB_LOCK_ASSERT(stcb)
5760						 * ;
5761						 */
5762					{
5763						/*
5764						 * printf("\nCalculating an
5765						 * nr_gap!!\nmapping_array_si
5766						 * ze = %d
5767						 * nr_mapping_array_size =
5768						 * %d" "\nmapping_array_base
5769						 * = %d
5770						 * nr_mapping_array_base =
5771						 * %d\nhighest_tsn_inside_map
5772						 *  = %d"
5773						 * "highest_tsn_inside_nr_map
5774						 *  = %d\nTSN = %d nr_gap =
5775						 * %d",asoc->mapping_array_si
5776						 * ze,
5777						 * asoc->nr_mapping_array_siz
5778						 * e,
5779						 * asoc->mapping_array_base_t
5780						 * sn,
5781						 * asoc->nr_mapping_array_bas
5782						 * e_tsn,
5783						 * asoc->highest_tsn_inside_m
5784						 * ap,
5785						 * asoc->highest_tsn_inside_n
5786						 * r_map,tsn,nr_gap);
5787						 */
5788					}
5789				}
5790			}
5791		} else {
5792			/* no more delivery now. */
5793			break;
5794		}
5795		ctl = nctl;
5796	}
5797	/*
5798	 * now we must deliver things in queue the normal way  if any are
5799	 * now ready.
5800	 */
5801	tt = strmin->last_sequence_delivered + 1;
5802	ctl = TAILQ_FIRST(&strmin->inqueue);
5803	while (ctl) {
5804		nctl = TAILQ_NEXT(ctl, next);
5805		if (tt == ctl->sinfo_ssn) {
5806			/* this is deliverable now */
5807			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5808			/* subtract pending on streams */
5809			asoc->size_on_all_streams -= ctl->length;
5810			sctp_ucount_decr(asoc->cnt_on_all_streams);
5811			/* deliver it to at least the delivery-q */
5812			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5813			if (stcb->sctp_socket) {
5814				/* EY */
5815				nr_tsn = ctl->sinfo_tsn;
5816				sctp_add_to_readq(stcb->sctp_ep, stcb,
5817				    ctl,
5818				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5819				/*
5820				 * EY this is the chunk that should be
5821				 * tagged nr gapped calculate the gap and
5822				 * such then tag this TSN nr
5823				 * chk->rec.data.TSN_seq
5824				 */
5825				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5826
5827					if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
5828						nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
5829					} else {
5830						nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
5831					}
5832					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5833					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5834						/*
5835						 * EY These should never
5836						 * happen, explained before
5837						 */
5838					} else {
5839						SCTP_TCB_LOCK_ASSERT(stcb);
5840						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5841						if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
5842						    MAX_TSN))
5843							asoc->highest_tsn_inside_nr_map = nr_tsn;
5844					}
5845
5846
5847					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5848						/*
5849						 * printf("In
5850						 * sctp_kick_prsctp_reorder_q
5851						 * ueue(8): Something wrong,
5852						 * the TSN to be tagged"
5853						 * "\nas NR is not even in
5854						 * the mapping_array, or map
5855						 * and nr_map are
5856						 * inconsistent");
5857						 */
5858						/*
5859						 * EY - not %100 sure about
5860						 * the lock thing, don't
5861						 * think its required
5862						 */
5863						/*
5864						 * SCTP_TCB_LOCK_ASSERT(stcb)
5865						 * ;
5866						 */
5867					{
5868						/*
5869						 * printf("\nCalculating an
5870						 * nr_gap!!\nmapping_array_si
5871						 * ze = %d
5872						 * nr_mapping_array_size =
5873						 * %d" "\nmapping_array_base
5874						 * = %d
5875						 * nr_mapping_array_base =
5876						 * %d\nhighest_tsn_inside_map
5877						 *  = %d"
5878						 * "highest_tsn_inside_nr_map
5879						 *  = %d\nTSN = %d nr_gap =
5880						 * %d",asoc->mapping_array_si
5881						 * ze,
5882						 * asoc->nr_mapping_array_siz
5883						 * e,
5884						 * asoc->mapping_array_base_t
5885						 * sn,
5886						 * asoc->nr_mapping_array_bas
5887						 * e_tsn,
5888						 * asoc->highest_tsn_inside_m
5889						 * ap,
5890						 * asoc->highest_tsn_inside_n
5891						 * r_map,tsn,nr_gap);
5892						 */
5893					}
5894				}
5895			}
5896			tt = strmin->last_sequence_delivered + 1;
5897		} else {
5898			break;
5899		}
5900		ctl = nctl;
5901	}
5902}
5903
5904static void
5905sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5906    struct sctp_association *asoc,
5907    uint16_t stream, uint16_t seq)
5908{
5909	struct sctp_tmit_chunk *chk, *at;
5910
5911	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5912		/* For each one on here see if we need to toss it */
5913		/*
5914		 * For now large messages held on the reasmqueue that are
5915		 * complete will be tossed too. We could in theory do more
5916		 * work to spin through and stop after dumping one msg aka
5917		 * seeing the start of a new msg at the head, and call the
5918		 * delivery function... to see if it can be delivered... But
5919		 * for now we just dump everything on the queue.
5920		 */
5921		chk = TAILQ_FIRST(&asoc->reasmqueue);
5922		while (chk) {
5923			at = TAILQ_NEXT(chk, sctp_next);
5924			if (chk->rec.data.stream_number != stream) {
5925				chk = at;
5926				continue;
5927			}
5928			if (chk->rec.data.stream_seq == seq) {
5929				/* It needs to be tossed */
5930				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5931				if (compare_with_wrap(chk->rec.data.TSN_seq,
5932				    asoc->tsn_last_delivered, MAX_TSN)) {
5933					asoc->tsn_last_delivered =
5934					    chk->rec.data.TSN_seq;
5935					asoc->str_of_pdapi =
5936					    chk->rec.data.stream_number;
5937					asoc->ssn_of_pdapi =
5938					    chk->rec.data.stream_seq;
5939					asoc->fragment_flags =
5940					    chk->rec.data.rcv_flags;
5941				}
5942				asoc->size_on_reasm_queue -= chk->send_size;
5943				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5944
5945				/* Clear up any stream problem */
5946				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5947				    SCTP_DATA_UNORDERED &&
5948				    (compare_with_wrap(chk->rec.data.stream_seq,
5949				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5950				    MAX_SEQ))) {
5951					/*
5952					 * We must dump forward this streams
5953					 * sequence number if the chunk is
5954					 * not unordered that is being
5955					 * skipped. There is a chance that
5956					 * if the peer does not include the
5957					 * last fragment in its FWD-TSN we
5958					 * WILL have a problem here since
5959					 * you would have a partial chunk in
5960					 * queue that may not be
5961					 * deliverable. Also if a Partial
5962					 * delivery API as started the user
5963					 * may get a partial chunk. The next
5964					 * read returning a new chunk...
5965					 * really ugly but I see no way
5966					 * around it! Maybe a notify??
5967					 */
5968					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5969					    chk->rec.data.stream_seq;
5970				}
5971				if (chk->data) {
5972					sctp_m_freem(chk->data);
5973					chk->data = NULL;
5974				}
5975				sctp_free_a_chunk(stcb, chk);
5976			} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5977				/*
5978				 * If the stream_seq is > than the purging
5979				 * one, we are done
5980				 */
5981				break;
5982			}
5983			chk = at;
5984		}
5985	}
5986}
5987
5988
5989void
5990sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5991    struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
5992{
5993	/*
5994	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5995	 * forward TSN, when the SACK comes back that acknowledges the
5996	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5997	 * get quite tricky since we may have sent more data interveneing
5998	 * and must carefully account for what the SACK says on the nonce
5999	 * and any gaps that are reported. This work will NOT be done here,
6000	 * but I note it here since it is really related to PR-SCTP and
6001	 * FWD-TSN's
6002	 */
6003
6004	/* The pr-sctp fwd tsn */
6005	/*
6006	 * here we will perform all the data receiver side steps for
6007	 * processing FwdTSN, as required in by pr-sctp draft:
6008	 *
6009	 * Assume we get FwdTSN(x):
6010	 *
6011	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
6012	 * others we have 3) examine and update re-ordering queue on
6013	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
6014	 * report where we are.
6015	 */
6016	struct sctp_association *asoc;
6017	uint32_t new_cum_tsn, gap;
6018	unsigned int i, fwd_sz, cumack_set_flag, m_size;
6019	uint32_t str_seq;
6020	struct sctp_stream_in *strm;
6021	struct sctp_tmit_chunk *chk, *at;
6022	struct sctp_queued_to_read *ctl, *sv;
6023
6024	cumack_set_flag = 0;
6025	asoc = &stcb->asoc;
6026	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
6027		SCTPDBG(SCTP_DEBUG_INDATA1,
6028		    "Bad size too small/big fwd-tsn\n");
6029		return;
6030	}
6031	m_size = (stcb->asoc.mapping_array_size << 3);
6032	/*************************************************************/
6033	/* 1. Here we update local cumTSN and shift the bitmap array */
6034	/*************************************************************/
6035	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
6036
6037	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
6038	    asoc->cumulative_tsn == new_cum_tsn) {
6039		/* Already got there ... */
6040		return;
6041	}
6042	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
6043	    MAX_TSN)) {
6044		asoc->highest_tsn_inside_map = new_cum_tsn;
6045		/* EY nr_mapping_array version of the above */
6046		/*
6047		 * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
6048		 * asoc->peer_supports_nr_sack)
6049		 */
6050		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6051		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6052			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6053		}
6054	}
6055	/*
6056	 * now we know the new TSN is more advanced, let's find the actual
6057	 * gap
6058	 */
6059	if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
6060	    MAX_TSN)) ||
6061	    (new_cum_tsn == asoc->mapping_array_base_tsn)) {
6062		gap = new_cum_tsn - asoc->mapping_array_base_tsn;
6063	} else {
6064		/* try to prevent underflow here */
6065		gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
6066	}
6067
6068	if (gap >= m_size) {
6069		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6070			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6071		}
6072		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
6073			struct mbuf *oper;
6074
6075			/*
6076			 * out of range (of single byte chunks in the rwnd I
6077			 * give out). This must be an attacker.
6078			 */
6079			*abort_flag = 1;
6080			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
6081			    0, M_DONTWAIT, 1, MT_DATA);
6082			if (oper) {
6083				struct sctp_paramhdr *ph;
6084				uint32_t *ippp;
6085
6086				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6087				    (sizeof(uint32_t) * 3);
6088				ph = mtod(oper, struct sctp_paramhdr *);
6089				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6090				ph->param_length = htons(SCTP_BUF_LEN(oper));
6091				ippp = (uint32_t *) (ph + 1);
6092				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
6093				ippp++;
6094				*ippp = asoc->highest_tsn_inside_map;
6095				ippp++;
6096				*ippp = new_cum_tsn;
6097			}
6098			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
6099			sctp_abort_an_association(stcb->sctp_ep, stcb,
6100			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6101			return;
6102		}
6103		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
6104		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
6105		cumack_set_flag = 1;
6106		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
6107		asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
6108		/* EY - nr_sack: nr_mapping_array version of the above */
6109		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
6110			memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
6111			asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
6112			asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6113			if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
6114				/*
6115				 * printf("IN sctp_handle_forward_tsn:
6116				 * Something is wrong the size of" "map and
6117				 * nr_map should be equal!")
6118				 */ ;
6119			}
6120		}
6121		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6122			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6123		}
6124		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
6125	} else {
6126		SCTP_TCB_LOCK_ASSERT(stcb);
6127		for (i = 0; i <= gap; i++) {
6128			SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
6129			/*
6130			 * EY if drain is off then every gap-ack is an
6131			 * nr-gap-ack
6132			 */
6133			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack
6134			    && SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
6135				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
6136			}
6137		}
6138		/*
6139		 * Now after marking all, slide thing forward but no sack
6140		 * please.
6141		 */
6142		sctp_sack_check(stcb, 0, 0, abort_flag);
6143		if (*abort_flag)
6144			return;
6145	}
6146	/*************************************************************/
6147	/* 2. Clear up re-assembly queue                             */
6148	/*************************************************************/
6149	/*
6150	 * First service it if pd-api is up, just in case we can progress it
6151	 * forward
6152	 */
6153	if (asoc->fragmented_delivery_inprogress) {
6154		sctp_service_reassembly(stcb, asoc);
6155	}
6156	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
6157		/* For each one on here see if we need to toss it */
6158		/*
6159		 * For now large messages held on the reasmqueue that are
6160		 * complete will be tossed too. We could in theory do more
6161		 * work to spin through and stop after dumping one msg aka
6162		 * seeing the start of a new msg at the head, and call the
6163		 * delivery function... to see if it can be delivered... But
6164		 * for now we just dump everything on the queue.
6165		 */
6166		chk = TAILQ_FIRST(&asoc->reasmqueue);
6167		while (chk) {
6168			at = TAILQ_NEXT(chk, sctp_next);
6169			if ((compare_with_wrap(new_cum_tsn,
6170			    chk->rec.data.TSN_seq, MAX_TSN)) ||
6171			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
6172				/* It needs to be tossed */
6173				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
6174				if (compare_with_wrap(chk->rec.data.TSN_seq,
6175				    asoc->tsn_last_delivered, MAX_TSN)) {
6176					asoc->tsn_last_delivered =
6177					    chk->rec.data.TSN_seq;
6178					asoc->str_of_pdapi =
6179					    chk->rec.data.stream_number;
6180					asoc->ssn_of_pdapi =
6181					    chk->rec.data.stream_seq;
6182					asoc->fragment_flags =
6183					    chk->rec.data.rcv_flags;
6184				}
6185				asoc->size_on_reasm_queue -= chk->send_size;
6186				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
6187
6188				/* Clear up any stream problem */
6189				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
6190				    SCTP_DATA_UNORDERED &&
6191				    (compare_with_wrap(chk->rec.data.stream_seq,
6192				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
6193				    MAX_SEQ))) {
6194					/*
6195					 * We must dump forward this streams
6196					 * sequence number if the chunk is
6197					 * not unordered that is being
6198					 * skipped. There is a chance that
6199					 * if the peer does not include the
6200					 * last fragment in its FWD-TSN we
6201					 * WILL have a problem here since
6202					 * you would have a partial chunk in
6203					 * queue that may not be
6204					 * deliverable. Also if a Partial
6205					 * delivery API as started the user
6206					 * may get a partial chunk. The next
6207					 * read returning a new chunk...
6208					 * really ugly but I see no way
6209					 * around it! Maybe a notify??
6210					 */
6211					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
6212					    chk->rec.data.stream_seq;
6213				}
6214				if (chk->data) {
6215					sctp_m_freem(chk->data);
6216					chk->data = NULL;
6217				}
6218				sctp_free_a_chunk(stcb, chk);
6219			} else {
6220				/*
6221				 * Ok we have gone beyond the end of the
6222				 * fwd-tsn's mark.
6223				 */
6224				break;
6225			}
6226			chk = at;
6227		}
6228	}
6229	/*******************************************************/
6230	/* 3. Update the PR-stream re-ordering queues and fix  */
6231	/* delivery issues as needed.                       */
6232	/*******************************************************/
6233	fwd_sz -= sizeof(*fwd);
6234	if (m && fwd_sz) {
6235		/* New method. */
6236		unsigned int num_str;
6237		struct sctp_strseq *stseq, strseqbuf;
6238
6239		offset += sizeof(*fwd);
6240
6241		SCTP_INP_READ_LOCK(stcb->sctp_ep);
6242		num_str = fwd_sz / sizeof(struct sctp_strseq);
6243		for (i = 0; i < num_str; i++) {
6244			uint16_t st;
6245
6246			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
6247			    sizeof(struct sctp_strseq),
6248			    (uint8_t *) & strseqbuf);
6249			offset += sizeof(struct sctp_strseq);
6250			if (stseq == NULL) {
6251				break;
6252			}
6253			/* Convert */
6254			st = ntohs(stseq->stream);
6255			stseq->stream = st;
6256			st = ntohs(stseq->sequence);
6257			stseq->sequence = st;
6258
6259			/* now process */
6260
6261			/*
6262			 * Ok we now look for the stream/seq on the read
6263			 * queue where its not all delivered. If we find it
6264			 * we transmute the read entry into a PDI_ABORTED.
6265			 */
6266			if (stseq->stream >= asoc->streamincnt) {
6267				/* screwed up streams, stop!  */
6268				break;
6269			}
6270			if ((asoc->str_of_pdapi == stseq->stream) &&
6271			    (asoc->ssn_of_pdapi == stseq->sequence)) {
6272				/*
6273				 * If this is the one we were partially
6274				 * delivering now then we no longer are.
6275				 * Note this will change with the reassembly
6276				 * re-write.
6277				 */
6278				asoc->fragmented_delivery_inprogress = 0;
6279			}
6280			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
6281			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
6282				if ((ctl->sinfo_stream == stseq->stream) &&
6283				    (ctl->sinfo_ssn == stseq->sequence)) {
6284					str_seq = (stseq->stream << 16) | stseq->sequence;
6285					ctl->end_added = 1;
6286					ctl->pdapi_aborted = 1;
6287					sv = stcb->asoc.control_pdapi;
6288					stcb->asoc.control_pdapi = ctl;
6289					sctp_notify_partial_delivery_indication(stcb,
6290					    SCTP_PARTIAL_DELIVERY_ABORTED,
6291					    SCTP_HOLDS_LOCK,
6292					    str_seq);
6293					stcb->asoc.control_pdapi = sv;
6294					break;
6295				} else if ((ctl->sinfo_stream == stseq->stream) &&
6296				    (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
6297					/* We are past our victim SSN */
6298					break;
6299				}
6300			}
6301			strm = &asoc->strmin[stseq->stream];
6302			if (compare_with_wrap(stseq->sequence,
6303			    strm->last_sequence_delivered, MAX_SEQ)) {
6304				/* Update the sequence number */
6305				strm->last_sequence_delivered =
6306				    stseq->sequence;
6307			}
6308			/* now kick the stream the new way */
6309			/* sa_ignore NO_NULL_CHK */
6310			sctp_kick_prsctp_reorder_queue(stcb, strm);
6311		}
6312		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
6313	}
6314	if (TAILQ_FIRST(&asoc->reasmqueue)) {
6315		/* now lets kick out and check for more fragmented delivery */
6316		/* sa_ignore NO_NULL_CHK */
6317		sctp_deliver_reasm_check(stcb, &stcb->asoc);
6318	}
6319}
6320
6321/* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
6322void
6323sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
6324    uint32_t rwnd, int nonce_sum_flag, int *abort_now)
6325{
6326	struct sctp_nets *net;
6327	struct sctp_association *asoc;
6328	struct sctp_tmit_chunk *tp1, *tp2;
6329	uint32_t old_rwnd;
6330	int win_probe_recovery = 0;
6331	int win_probe_recovered = 0;
6332	int j, done_once = 0;
6333
6334	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
6335		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
6336		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
6337	}
6338	SCTP_TCB_LOCK_ASSERT(stcb);
6339#ifdef SCTP_ASOCLOG_OF_TSNS
6340	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
6341	stcb->asoc.cumack_log_at++;
6342	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
6343		stcb->asoc.cumack_log_at = 0;
6344	}
6345#endif
6346	asoc = &stcb->asoc;
6347	old_rwnd = asoc->peers_rwnd;
6348	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
6349		/* old ack */
6350		return;
6351	} else if (asoc->last_acked_seq == cumack) {
6352		/* Window update sack */
6353		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6354		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6355		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6356			/* SWS sender side engages */
6357			asoc->peers_rwnd = 0;
6358		}
6359		if (asoc->peers_rwnd > old_rwnd) {
6360			goto again;
6361		}
6362		return;
6363	}
6364	/* First setup for CC stuff */
6365	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6366		net->prev_cwnd = net->cwnd;
6367		net->net_ack = 0;
6368		net->net_ack2 = 0;
6369
6370		/*
6371		 * CMT: Reset CUC and Fast recovery algo variables before
6372		 * SACK processing
6373		 */
6374		net->new_pseudo_cumack = 0;
6375		net->will_exit_fast_recovery = 0;
6376	}
6377	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
6378		uint32_t send_s;
6379
6380		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
6381			tp1 = TAILQ_LAST(&asoc->sent_queue,
6382			    sctpchunk_listhead);
6383			send_s = tp1->rec.data.TSN_seq + 1;
6384		} else {
6385			send_s = asoc->sending_seq;
6386		}
6387		if ((cumack == send_s) ||
6388		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
6389#ifndef INVARIANTS
6390			struct mbuf *oper;
6391
6392#endif
6393#ifdef INVARIANTS
6394			panic("Impossible sack 1");
6395#else
6396			*abort_now = 1;
6397			/* XXX */
6398			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6399			    0, M_DONTWAIT, 1, MT_DATA);
6400			if (oper) {
6401				struct sctp_paramhdr *ph;
6402				uint32_t *ippp;
6403
6404				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6405				    sizeof(uint32_t);
6406				ph = mtod(oper, struct sctp_paramhdr *);
6407				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6408				ph->param_length = htons(SCTP_BUF_LEN(oper));
6409				ippp = (uint32_t *) (ph + 1);
6410				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
6411			}
6412			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
6413			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6414			return;
6415#endif
6416		}
6417	}
6418	asoc->this_sack_highest_gap = cumack;
6419	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6420		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6421		    stcb->asoc.overall_error_count,
6422		    0,
6423		    SCTP_FROM_SCTP_INDATA,
6424		    __LINE__);
6425	}
6426	stcb->asoc.overall_error_count = 0;
6427	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
6428		/* process the new consecutive TSN first */
6429		tp1 = TAILQ_FIRST(&asoc->sent_queue);
6430		while (tp1) {
6431			tp2 = TAILQ_NEXT(tp1, sctp_next);
6432			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
6433			    MAX_TSN) ||
6434			    cumack == tp1->rec.data.TSN_seq) {
6435				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
6436					printf("Warning, an unsent is now acked?\n");
6437				}
6438				/*
6439				 * ECN Nonce: Add the nonce to the sender's
6440				 * nonce sum
6441				 */
6442				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
6443				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
6444					/*
6445					 * If it is less than ACKED, it is
6446					 * now no-longer in flight. Higher
6447					 * values may occur during marking
6448					 */
6449					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6450						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6451							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
6452							    tp1->whoTo->flight_size,
6453							    tp1->book_size,
6454							    (uintptr_t) tp1->whoTo,
6455							    tp1->rec.data.TSN_seq);
6456						}
6457						sctp_flight_size_decrease(tp1);
6458						/* sa_ignore NO_NULL_CHK */
6459						sctp_total_flight_decrease(stcb, tp1);
6460					}
6461					tp1->whoTo->net_ack += tp1->send_size;
6462					if (tp1->snd_count < 2) {
6463						/*
6464						 * True non-retransmited
6465						 * chunk
6466						 */
6467						tp1->whoTo->net_ack2 +=
6468						    tp1->send_size;
6469
6470						/* update RTO too? */
6471						if (tp1->do_rtt) {
6472							tp1->whoTo->RTO =
6473							/*
6474							 * sa_ignore
6475							 * NO_NULL_CHK
6476							 */
6477							    sctp_calculate_rto(stcb,
6478							    asoc, tp1->whoTo,
6479							    &tp1->sent_rcv_time,
6480							    sctp_align_safe_nocopy);
6481							tp1->do_rtt = 0;
6482						}
6483					}
6484					/*
6485					 * CMT: CUCv2 algorithm. From the
6486					 * cumack'd TSNs, for each TSN being
6487					 * acked for the first time, set the
6488					 * following variables for the
6489					 * corresp destination.
6490					 * new_pseudo_cumack will trigger a
6491					 * cwnd update.
6492					 * find_(rtx_)pseudo_cumack will
6493					 * trigger search for the next
6494					 * expected (rtx-)pseudo-cumack.
6495					 */
6496					tp1->whoTo->new_pseudo_cumack = 1;
6497					tp1->whoTo->find_pseudo_cumack = 1;
6498					tp1->whoTo->find_rtx_pseudo_cumack = 1;
6499
6500					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6501						/* sa_ignore NO_NULL_CHK */
6502						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6503					}
6504				}
6505				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6506					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6507				}
6508				if (tp1->rec.data.chunk_was_revoked) {
6509					/* deflate the cwnd */
6510					tp1->whoTo->cwnd -= tp1->book_size;
6511					tp1->rec.data.chunk_was_revoked = 0;
6512				}
6513				tp1->sent = SCTP_DATAGRAM_ACKED;
6514				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
6515				if (tp1->data) {
6516					/* sa_ignore NO_NULL_CHK */
6517					sctp_free_bufspace(stcb, asoc, tp1, 1);
6518					sctp_m_freem(tp1->data);
6519				}
6520				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6521					sctp_log_sack(asoc->last_acked_seq,
6522					    cumack,
6523					    tp1->rec.data.TSN_seq,
6524					    0,
6525					    0,
6526					    SCTP_LOG_FREE_SENT);
6527				}
6528				tp1->data = NULL;
6529				asoc->sent_queue_cnt--;
6530				sctp_free_a_chunk(stcb, tp1);
6531				tp1 = tp2;
6532			} else {
6533				break;
6534			}
6535		}
6536
6537	}
6538	/* sa_ignore NO_NULL_CHK */
6539	if (stcb->sctp_socket) {
6540#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6541		struct socket *so;
6542
6543#endif
6544
6545		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
6546		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6547			/* sa_ignore NO_NULL_CHK */
6548			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
6549		}
6550#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6551		so = SCTP_INP_SO(stcb->sctp_ep);
6552		atomic_add_int(&stcb->asoc.refcnt, 1);
6553		SCTP_TCB_UNLOCK(stcb);
6554		SCTP_SOCKET_LOCK(so, 1);
6555		SCTP_TCB_LOCK(stcb);
6556		atomic_subtract_int(&stcb->asoc.refcnt, 1);
6557		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6558			/* assoc was freed while we were unlocked */
6559			SCTP_SOCKET_UNLOCK(so, 1);
6560			return;
6561		}
6562#endif
6563		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
6564#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6565		SCTP_SOCKET_UNLOCK(so, 1);
6566#endif
6567	} else {
6568		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6569			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
6570		}
6571	}
6572
6573	/* JRS - Use the congestion control given in the CC module */
6574	if (asoc->last_acked_seq != cumack)
6575		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
6576
6577	asoc->last_acked_seq = cumack;
6578
6579	if (TAILQ_EMPTY(&asoc->sent_queue)) {
6580		/* nothing left in-flight */
6581		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6582			net->flight_size = 0;
6583			net->partial_bytes_acked = 0;
6584		}
6585		asoc->total_flight = 0;
6586		asoc->total_flight_count = 0;
6587	}
6588	/* Fix up the a-p-a-p for future PR-SCTP sends */
6589	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
6590		asoc->advanced_peer_ack_point = cumack;
6591	}
6592	/* ECN Nonce updates */
6593	if (asoc->ecn_nonce_allowed) {
6594		if (asoc->nonce_sum_check) {
6595			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
6596				if (asoc->nonce_wait_for_ecne == 0) {
6597					struct sctp_tmit_chunk *lchk;
6598
6599					lchk = TAILQ_FIRST(&asoc->send_queue);
6600					asoc->nonce_wait_for_ecne = 1;
6601					if (lchk) {
6602						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
6603					} else {
6604						asoc->nonce_wait_tsn = asoc->sending_seq;
6605					}
6606				} else {
6607					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
6608					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
6609						/*
6610						 * Misbehaving peer. We need
6611						 * to react to this guy
6612						 */
6613						asoc->ecn_allowed = 0;
6614						asoc->ecn_nonce_allowed = 0;
6615					}
6616				}
6617			}
6618		} else {
6619			/* See if Resynchronization Possible */
6620			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
6621				asoc->nonce_sum_check = 1;
6622				/*
6623				 * now we must calculate what the base is.
6624				 * We do this based on two things, we know
6625				 * the total's for all the segments
6626				 * gap-acked in the SACK (none), We also
6627				 * know the SACK's nonce sum, its in
6628				 * nonce_sum_flag. So we can build a truth
6629				 * table to back-calculate the new value of
6630				 * asoc->nonce_sum_expect_base:
6631				 *
6632				 * SACK-flag-Value         Seg-Sums Base 0 0 0
6633				 * 1                    0 1 0 1 1 1 1 0
6634				 */
6635				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
6636			}
6637		}
6638	}
6639	/* RWND update */
6640	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6641	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6642	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6643		/* SWS sender side engages */
6644		asoc->peers_rwnd = 0;
6645	}
6646	if (asoc->peers_rwnd > old_rwnd) {
6647		win_probe_recovery = 1;
6648	}
6649	/* Now assure a timer where data is queued at */
6650again:
6651	j = 0;
6652	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6653		int to_ticks;
6654
6655		if (win_probe_recovery && (net->window_probe)) {
6656			win_probe_recovered = 1;
6657			/*
6658			 * Find first chunk that was used with window probe
6659			 * and clear the sent
6660			 */
6661			/* sa_ignore FREED_MEMORY */
6662			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6663				if (tp1->window_probe) {
6664					/* move back to data send queue */
6665					sctp_window_probe_recovery(stcb, asoc, net, tp1);
6666					break;
6667				}
6668			}
6669		}
6670		if (net->RTO == 0) {
6671			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
6672		} else {
6673			to_ticks = MSEC_TO_TICKS(net->RTO);
6674		}
6675		if (net->flight_size) {
6676
6677			j++;
6678			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6679			    sctp_timeout_handler, &net->rxt_timer);
6680			if (net->window_probe) {
6681				net->window_probe = 0;
6682			}
6683		} else {
6684			if (net->window_probe) {
6685				/*
6686				 * In window probes we must assure a timer
6687				 * is still running there
6688				 */
6689				net->window_probe = 0;
6690				(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6691				    sctp_timeout_handler, &net->rxt_timer);
6692			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
6693				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
6694				    stcb, net,
6695				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
6696			}
6697			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
6698				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6699					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
6700					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
6701					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
6702				}
6703			}
6704		}
6705	}
6706	if ((j == 0) &&
6707	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
6708	    (asoc->sent_queue_retran_cnt == 0) &&
6709	    (win_probe_recovered == 0) &&
6710	    (done_once == 0)) {
6711		/*
6712		 * huh, this should not happen unless all packets are
6713		 * PR-SCTP and marked to skip of course.
6714		 */
6715		if (sctp_fs_audit(asoc)) {
6716			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6717				net->flight_size = 0;
6718			}
6719			asoc->total_flight = 0;
6720			asoc->total_flight_count = 0;
6721			asoc->sent_queue_retran_cnt = 0;
6722			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6723				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6724					sctp_flight_size_increase(tp1);
6725					sctp_total_flight_increase(stcb, tp1);
6726				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6727					asoc->sent_queue_retran_cnt++;
6728				}
6729			}
6730		}
6731		done_once = 1;
6732		goto again;
6733	}
6734	/**********************************/
6735	/* Now what about shutdown issues */
6736	/**********************************/
6737	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
6738		/* nothing left on sendqueue.. consider done */
6739		/* clean up */
6740		if ((asoc->stream_queue_cnt == 1) &&
6741		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
6742		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
6743		    (asoc->locked_on_sending)
6744		    ) {
6745			struct sctp_stream_queue_pending *sp;
6746
6747			/*
6748			 * I may be in a state where we got all across.. but
6749			 * cannot write more due to a shutdown... we abort
6750			 * since the user did not indicate EOR in this case.
6751			 * The sp will be cleaned during free of the asoc.
6752			 */
6753			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
6754			    sctp_streamhead);
6755			if ((sp) && (sp->length == 0)) {
6756				/* Let cleanup code purge it */
6757				if (sp->msg_is_complete) {
6758					asoc->stream_queue_cnt--;
6759				} else {
6760					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6761					asoc->locked_on_sending = NULL;
6762					asoc->stream_queue_cnt--;
6763				}
6764			}
6765		}
6766		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
6767		    (asoc->stream_queue_cnt == 0)) {
6768			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6769				/* Need to abort here */
6770				struct mbuf *oper;
6771
6772		abort_out_now:
6773				*abort_now = 1;
6774				/* XXX */
6775				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6776				    0, M_DONTWAIT, 1, MT_DATA);
6777				if (oper) {
6778					struct sctp_paramhdr *ph;
6779					uint32_t *ippp;
6780
6781					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6782					    sizeof(uint32_t);
6783					ph = mtod(oper, struct sctp_paramhdr *);
6784					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6785					ph->param_length = htons(SCTP_BUF_LEN(oper));
6786					ippp = (uint32_t *) (ph + 1);
6787					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
6788				}
6789				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
6790				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
6791			} else {
6792				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
6793				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
6794					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6795				}
6796				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6797				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6798				sctp_stop_timers_for_shutdown(stcb);
6799				sctp_send_shutdown(stcb,
6800				    stcb->asoc.primary_destination);
6801				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
6802				    stcb->sctp_ep, stcb, asoc->primary_destination);
6803				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
6804				    stcb->sctp_ep, stcb, asoc->primary_destination);
6805			}
6806		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
6807		    (asoc->stream_queue_cnt == 0)) {
6808			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6809				goto abort_out_now;
6810			}
6811			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6812			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
6813			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6814			sctp_send_shutdown_ack(stcb,
6815			    stcb->asoc.primary_destination);
6816
6817			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
6818			    stcb->sctp_ep, stcb, asoc->primary_destination);
6819		}
6820	}
6821	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
6822		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
6823		    rwnd,
6824		    stcb->asoc.peers_rwnd,
6825		    stcb->asoc.total_flight,
6826		    stcb->asoc.total_output_queue_size);
6827	}
6828}
6829
6830/* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
6831static void
6832sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
6833    struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
6834    uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
6835    uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
6836{
6837	/************************************************/
6838	/* process fragments and update sendqueue        */
6839	/************************************************/
6840	struct sctp_nr_sack *nr_sack;
6841	struct sctp_gap_ack_block *frag, block;
6842	struct sctp_nr_gap_ack_block *nr_frag, nr_block;
6843	struct sctp_tmit_chunk *tp1;
6844	uint32_t i, j, all_bit;
6845	int wake_him = 0;
6846	uint32_t theTSN;
6847	int num_frs = 0;
6848
6849	uint16_t frag_strt, frag_end, primary_flag_set;
6850	uint16_t nr_frag_strt, nr_frag_end;
6851
6852	uint32_t last_frag_high;
6853	uint32_t last_nr_frag_high;
6854
6855	all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
6856
6857	/*
6858	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
6859	 */
6860	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
6861		primary_flag_set = 1;
6862	} else {
6863		primary_flag_set = 0;
6864	}
6865	nr_sack = &ch->nr_sack;
6866
6867	/*
6868	 * EY! - I will process nr_gaps similarly,by going to this position
6869	 * again if All bit is set
6870	 */
6871	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
6872	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
6873	*offset += sizeof(block);
6874	if (frag == NULL) {
6875		return;
6876	}
6877	tp1 = NULL;
6878	last_frag_high = 0;
6879	for (i = 0; i < num_seg; i++) {
6880		frag_strt = ntohs(frag->start);
6881		frag_end = ntohs(frag->end);
6882		/* some sanity checks on the fargment offsets */
6883		if (frag_strt > frag_end) {
6884			/* this one is malformed, skip */
6885			frag++;
6886			continue;
6887		}
6888		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
6889		    MAX_TSN))
6890			*biggest_tsn_acked = frag_end + last_tsn;
6891
6892		/* mark acked dgs and find out the highestTSN being acked */
6893		if (tp1 == NULL) {
6894			tp1 = TAILQ_FIRST(&asoc->sent_queue);
6895
6896			/* save the locations of the last frags */
6897			last_frag_high = frag_end + last_tsn;
6898		} else {
6899			/*
6900			 * now lets see if we need to reset the queue due to
6901			 * a out-of-order SACK fragment
6902			 */
6903			if (compare_with_wrap(frag_strt + last_tsn,
6904			    last_frag_high, MAX_TSN)) {
6905				/*
6906				 * if the new frag starts after the last TSN
6907				 * frag covered, we are ok and this one is
6908				 * beyond the last one
6909				 */
6910				;
6911			} else {
6912				/*
6913				 * ok, they have reset us, so we need to
6914				 * reset the queue this will cause extra
6915				 * hunting but hey, they chose the
6916				 * performance hit when they failed to order
6917				 * there gaps..
6918				 */
6919				tp1 = TAILQ_FIRST(&asoc->sent_queue);
6920			}
6921			last_frag_high = frag_end + last_tsn;
6922		}
6923		for (j = frag_strt; j <= frag_end; j++) {
6924			theTSN = j + last_tsn;
6925			while (tp1) {
6926				if (tp1->rec.data.doing_fast_retransmit)
6927					num_frs++;
6928
6929				/*
6930				 * CMT: CUCv2 algorithm. For each TSN being
6931				 * processed from the sent queue, track the
6932				 * next expected pseudo-cumack, or
6933				 * rtx_pseudo_cumack, if required. Separate
6934				 * cumack trackers for first transmissions,
6935				 * and retransmissions.
6936				 */
6937				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6938				    (tp1->snd_count == 1)) {
6939					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
6940					tp1->whoTo->find_pseudo_cumack = 0;
6941				}
6942				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6943				    (tp1->snd_count > 1)) {
6944					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
6945					tp1->whoTo->find_rtx_pseudo_cumack = 0;
6946				}
6947				if (tp1->rec.data.TSN_seq == theTSN) {
6948					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
6949						/*
6950						 * must be held until
6951						 * cum-ack passes
6952						 */
6953						/*
6954						 * ECN Nonce: Add the nonce
6955						 * value to the sender's
6956						 * nonce sum
6957						 */
6958						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6959							/*-
6960							 * If it is less than RESEND, it is
6961							 * now no-longer in flight.
6962							 * Higher values may already be set
6963							 * via previous Gap Ack Blocks...
6964							 * i.e. ACKED or RESEND.
6965							 */
6966							if (compare_with_wrap(tp1->rec.data.TSN_seq,
6967							    *biggest_newly_acked_tsn, MAX_TSN)) {
6968								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
6969							}
6970							/*
6971							 * CMT: SFR algo
6972							 * (and HTNA) - set
6973							 * saw_newack to 1
6974							 * for dest being
6975							 * newly acked.
6976							 * update
6977							 * this_sack_highest_
6978							 * newack if
6979							 * appropriate.
6980							 */
6981							if (tp1->rec.data.chunk_was_revoked == 0)
6982								tp1->whoTo->saw_newack = 1;
6983
6984							if (compare_with_wrap(tp1->rec.data.TSN_seq,
6985							    tp1->whoTo->this_sack_highest_newack,
6986							    MAX_TSN)) {
6987								tp1->whoTo->this_sack_highest_newack =
6988								    tp1->rec.data.TSN_seq;
6989							}
6990							/*
6991							 * CMT DAC algo:
6992							 * also update
6993							 * this_sack_lowest_n
6994							 * ewack
6995							 */
6996							if (*this_sack_lowest_newack == 0) {
6997								if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6998									sctp_log_sack(*this_sack_lowest_newack,
6999									    last_tsn,
7000									    tp1->rec.data.TSN_seq,
7001									    0,
7002									    0,
7003									    SCTP_LOG_TSN_ACKED);
7004								}
7005								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7006							}
7007							/*
7008							 * CMT: CUCv2
7009							 * algorithm. If
7010							 * (rtx-)pseudo-cumac
7011							 * k for corresp
7012							 * dest is being
7013							 * acked, then we
7014							 * have a new
7015							 * (rtx-)pseudo-cumac
7016							 * k. Set
7017							 * new_(rtx_)pseudo_c
7018							 * umack to TRUE so
7019							 * that the cwnd for
7020							 * this dest can be
7021							 * updated. Also
7022							 * trigger search
7023							 * for the next
7024							 * expected
7025							 * (rtx-)pseudo-cumac
7026							 * k. Separate
7027							 * pseudo_cumack
7028							 * trackers for
7029							 * first
7030							 * transmissions and
7031							 * retransmissions.
7032							 */
7033							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
7034								if (tp1->rec.data.chunk_was_revoked == 0) {
7035									tp1->whoTo->new_pseudo_cumack = 1;
7036								}
7037								tp1->whoTo->find_pseudo_cumack = 1;
7038							}
7039							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7040								sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7041							}
7042							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
7043								if (tp1->rec.data.chunk_was_revoked == 0) {
7044									tp1->whoTo->new_pseudo_cumack = 1;
7045								}
7046								tp1->whoTo->find_rtx_pseudo_cumack = 1;
7047							}
7048							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7049								sctp_log_sack(*biggest_newly_acked_tsn,
7050								    last_tsn,
7051								    tp1->rec.data.TSN_seq,
7052								    frag_strt,
7053								    frag_end,
7054								    SCTP_LOG_TSN_ACKED);
7055							}
7056							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7057								sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
7058								    tp1->whoTo->flight_size,
7059								    tp1->book_size,
7060								    (uintptr_t) tp1->whoTo,
7061								    tp1->rec.data.TSN_seq);
7062							}
7063							sctp_flight_size_decrease(tp1);
7064							sctp_total_flight_decrease(stcb, tp1);
7065
7066							tp1->whoTo->net_ack += tp1->send_size;
7067							if (tp1->snd_count < 2) {
7068								/*
7069								 * True
7070								 * non-retran
7071								 * smited
7072								 * chunk
7073								 */
7074								tp1->whoTo->net_ack2 += tp1->send_size;
7075
7076								/*
7077								 * update
7078								 * RTO too ?
7079								 */
7080								if (tp1->do_rtt) {
7081									tp1->whoTo->RTO =
7082									    sctp_calculate_rto(stcb,
7083									    asoc,
7084									    tp1->whoTo,
7085									    &tp1->sent_rcv_time,
7086									    sctp_align_safe_nocopy);
7087									tp1->do_rtt = 0;
7088								}
7089							}
7090						}
7091						if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
7092							(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
7093							(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
7094							if (compare_with_wrap(tp1->rec.data.TSN_seq,
7095							    asoc->this_sack_highest_gap,
7096							    MAX_TSN)) {
7097								asoc->this_sack_highest_gap =
7098								    tp1->rec.data.TSN_seq;
7099							}
7100							if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7101								sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7102#ifdef SCTP_AUDITING_ENABLED
7103								sctp_audit_log(0xB2,
7104								    (asoc->sent_queue_retran_cnt & 0x000000ff));
7105#endif
7106							}
7107						}
7108						/*
7109						 * All chunks NOT UNSENT
7110						 * fall through here and are
7111						 * marked
7112						 */
7113						if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
7114							tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7115						if (tp1->rec.data.chunk_was_revoked) {
7116							/* deflate the cwnd */
7117							tp1->whoTo->cwnd -= tp1->book_size;
7118							tp1->rec.data.chunk_was_revoked = 0;
7119						}
7120						/*
7121						 * EY - if all bit is set
7122						 * then this TSN is
7123						 * nr_marked
7124						 */
7125						if (all_bit) {
7126							if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
7127								tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7128							/*
7129							 * TAILQ_REMOVE(&asoc
7130							 * ->sent_queue,
7131							 * tp1, sctp_next);
7132							 */
7133							if (tp1->data) {
7134								/*
7135								 * sa_ignore
7136								 * NO_NULL_CH
7137								 * K
7138								 */
7139								sctp_free_bufspace(stcb, asoc, tp1, 1);
7140								sctp_m_freem(tp1->data);
7141							}
7142							tp1->data = NULL;
7143							/*
7144							 * asoc->sent_queue_c
7145							 * nt--;
7146							 */
7147							/*
7148							 * sctp_free_a_chunk(
7149							 * stcb, tp1);
7150							 */
7151							wake_him++;
7152						}
7153					}
7154					break;
7155				}	/* if (tp1->TSN_seq == theTSN) */
7156				if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
7157				    MAX_TSN))
7158					break;
7159
7160				tp1 = TAILQ_NEXT(tp1, sctp_next);
7161			}	/* end while (tp1) */
7162		}		/* end for (j = fragStart */
7163		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
7164		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
7165		*offset += sizeof(block);
7166		if (frag == NULL) {
7167			break;
7168		}
7169	}
7170
7171	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
7172		if (num_frs)
7173			sctp_log_fr(*biggest_tsn_acked,
7174			    *biggest_newly_acked_tsn,
7175			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
7176	}
7177	/*
7178	 * EY - if all bit is not set then there should be other loops to
7179	 * identify nr TSNs
7180	 */
7181	if (!all_bit) {
7182
7183		nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7184		    sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7185		*offset += sizeof(nr_block);
7186
7187
7188
7189		if (nr_frag == NULL) {
7190			return;
7191		}
7192		tp1 = NULL;
7193		last_nr_frag_high = 0;
7194
7195		for (i = 0; i < num_nr_seg; i++) {
7196
7197			nr_frag_strt = ntohs(nr_frag->start);
7198			nr_frag_end = ntohs(nr_frag->end);
7199
7200			/* some sanity checks on the nr fargment offsets */
7201			if (nr_frag_strt > nr_frag_end) {
7202				/* this one is malformed, skip */
7203				nr_frag++;
7204				continue;
7205			}
7206			/*
7207			 * mark acked dgs and find out the highestTSN being
7208			 * acked
7209			 */
7210			if (tp1 == NULL) {
7211				tp1 = TAILQ_FIRST(&asoc->sent_queue);
7212
7213				/* save the locations of the last frags */
7214				last_nr_frag_high = nr_frag_end + last_tsn;
7215			} else {
7216				/*
7217				 * now lets see if we need to reset the
7218				 * queue due to a out-of-order SACK fragment
7219				 */
7220				if (compare_with_wrap(nr_frag_strt + last_tsn,
7221				    last_nr_frag_high, MAX_TSN)) {
7222					/*
7223					 * if the new frag starts after the
7224					 * last TSN frag covered, we are ok
7225					 * and this one is beyond the last
7226					 * one
7227					 */
7228					;
7229				} else {
7230					/*
7231					 * ok, they have reset us, so we
7232					 * need to reset the queue this will
7233					 * cause extra hunting but hey, they
7234					 * chose the performance hit when
7235					 * they failed to order there gaps..
7236					 */
7237					tp1 = TAILQ_FIRST(&asoc->sent_queue);
7238				}
7239				last_nr_frag_high = nr_frag_end + last_tsn;
7240			}
7241
7242			for (j = nr_frag_strt + last_tsn; (compare_with_wrap((nr_frag_end + last_tsn), j, MAX_TSN)); j++) {
7243				while (tp1) {
7244					if (tp1->rec.data.TSN_seq == j) {
7245						if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7246							if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
7247								tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7248							/*
7249							 * TAILQ_REMOVE(&asoc
7250							 * ->sent_queue,
7251							 * tp1, sctp_next);
7252							 */
7253							if (tp1->data) {
7254								/*
7255								 * sa_ignore
7256								 * NO_NULL_CH
7257								 * K
7258								 */
7259								sctp_free_bufspace(stcb, asoc, tp1, 1);
7260								sctp_m_freem(tp1->data);
7261							}
7262							tp1->data = NULL;
7263							/*
7264							 * asoc->sent_queue_c
7265							 * nt--;
7266							 */
7267							/*
7268							 * sctp_free_a_chunk(
7269							 * stcb, tp1);
7270							 */
7271							wake_him++;
7272						}
7273						break;
7274					}	/* if (tp1->TSN_seq == j) */
7275					if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
7276					    MAX_TSN))
7277						break;
7278					tp1 = TAILQ_NEXT(tp1, sctp_next);
7279				}	/* end while (tp1) */
7280
7281			}	/* end for (j = nrFragStart */
7282
7283			nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7284			    sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7285			*offset += sizeof(nr_block);
7286			if (nr_frag == NULL) {
7287				break;
7288			}
7289		}		/* end of if(!all_bit) */
7290	}
7291	/*
7292	 * EY- wake up the socket if things have been removed from the sent
7293	 * queue
7294	 */
7295	if ((wake_him) && (stcb->sctp_socket)) {
7296#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7297		struct socket *so;
7298
7299#endif
7300		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7301		/*
7302		 * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
7303		 * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
7304		 * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
7305		 */
7306#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7307		so = SCTP_INP_SO(stcb->sctp_ep);
7308		atomic_add_int(&stcb->asoc.refcnt, 1);
7309		SCTP_TCB_UNLOCK(stcb);
7310		SCTP_SOCKET_LOCK(so, 1);
7311		SCTP_TCB_LOCK(stcb);
7312		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7313		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7314			/* assoc was freed while we were unlocked */
7315			SCTP_SOCKET_UNLOCK(so, 1);
7316			return;
7317		}
7318#endif
7319		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7320#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7321		SCTP_SOCKET_UNLOCK(so, 1);
7322#endif
7323	}			/* else { if
7324				 * (SCTP_BASE_SYSCTL(sctp_logging_level) &
7325				 * SCTP_WAKE_LOGGING_ENABLE) {
7326				 * sctp_wakeup_log(stcb, cum_ack, wake_him,
7327				 * SCTP_NOWAKE_FROM_SACK); } } */
7328}
7329
7330/* EY- nr_sack */
7331/* Identifies the non-renegable tsns that are revoked*/
7332static void
7333sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
7334    struct sctp_association *asoc, uint32_t cumack,
7335    u_long biggest_tsn_acked)
7336{
7337	struct sctp_tmit_chunk *tp1;
7338
7339	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7340	while (tp1) {
7341		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
7342		    MAX_TSN)) {
7343			/*
7344			 * ok this guy is either ACK or MARKED. If it is
7345			 * ACKED it has been previously acked but not this
7346			 * time i.e. revoked.  If it is MARKED it was ACK'ed
7347			 * again.
7348			 */
7349			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
7350			    MAX_TSN))
7351				break;
7352
7353
7354			if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
7355				/*
7356				 * EY! a non-renegable TSN is revoked, need
7357				 * to abort the association
7358				 */
7359				/*
7360				 * EY TODO: put in the code to abort the
7361				 * assoc.
7362				 */
7363				return;
7364			} else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
7365				/* it has been re-acked in this SACK */
7366				tp1->sent = SCTP_DATAGRAM_NR_ACKED;
7367			}
7368		}
7369		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
7370			break;
7371		tp1 = TAILQ_NEXT(tp1, sctp_next);
7372	}
7373}
7374
7375/* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
7376void
7377sctp_handle_nr_sack(struct mbuf *m, int offset,
7378    struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
7379    struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
7380{
7381	struct sctp_association *asoc;
7382
7383	/* EY sack */
7384	struct sctp_nr_sack *nr_sack;
7385	struct sctp_tmit_chunk *tp1, *tp2;
7386	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
7387	         this_sack_lowest_newack;
7388	uint32_t sav_cum_ack;
7389
7390	/* EY num_seg */
7391	uint16_t num_seg, num_nr_seg, num_dup;
7392	uint16_t wake_him = 0;
7393	unsigned int nr_sack_length;
7394	uint32_t send_s = 0;
7395	long j;
7396	int accum_moved = 0;
7397	int will_exit_fast_recovery = 0;
7398	uint32_t a_rwnd, old_rwnd;
7399	int win_probe_recovery = 0;
7400	int win_probe_recovered = 0;
7401	struct sctp_nets *net = NULL;
7402	int nonce_sum_flag, ecn_seg_sums = 0, all_bit;
7403	int done_once;
7404	uint8_t reneged_all = 0;
7405	uint8_t cmt_dac_flag;
7406
7407	/*
7408	 * we take any chance we can to service our queues since we cannot
7409	 * get awoken when the socket is read from :<
7410	 */
7411	/*
7412	 * Now perform the actual SACK handling: 1) Verify that it is not an
7413	 * old sack, if so discard. 2) If there is nothing left in the send
7414	 * queue (cum-ack is equal to last acked) then you have a duplicate
7415	 * too, update any rwnd change and verify no timers are running.
7416	 * then return. 3) Process any new consequtive data i.e. cum-ack
7417	 * moved process these first and note that it moved. 4) Process any
7418	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
7419	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
7420	 * sync up flightsizes and things, stop all timers and also check
7421	 * for shutdown_pending state. If so then go ahead and send off the
7422	 * shutdown. If in shutdown recv, send off the shutdown-ack and
7423	 * start that timer, Ret. 9) Strike any non-acked things and do FR
7424	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
7425	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
7426	 * if in shutdown_recv state.
7427	 */
7428	SCTP_TCB_LOCK_ASSERT(stcb);
7429	nr_sack = &ch->nr_sack;
7430	/* CMT DAC algo */
7431	this_sack_lowest_newack = 0;
7432	j = 0;
7433	nr_sack_length = (unsigned int)nr_sack_len;
7434	/* ECN Nonce */
7435	SCTP_STAT_INCR(sctps_slowpath_sack);
7436	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
7437	cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
7438#ifdef SCTP_ASOCLOG_OF_TSNS
7439	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
7440	stcb->asoc.cumack_log_at++;
7441	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
7442		stcb->asoc.cumack_log_at = 0;
7443	}
7444#endif
7445	all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
7446	num_seg = ntohs(nr_sack->num_gap_ack_blks);
7447	num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
7448	if (all_bit)
7449		num_seg = num_nr_seg;
7450	a_rwnd = rwnd;
7451
7452	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
7453		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
7454		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
7455	}
7456	/* CMT DAC algo */
7457	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
7458	num_dup = ntohs(nr_sack->num_dup_tsns);
7459
7460	old_rwnd = stcb->asoc.peers_rwnd;
7461	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
7462		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
7463		    stcb->asoc.overall_error_count,
7464		    0,
7465		    SCTP_FROM_SCTP_INDATA,
7466		    __LINE__);
7467	}
7468	stcb->asoc.overall_error_count = 0;
7469	asoc = &stcb->asoc;
7470	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7471		sctp_log_sack(asoc->last_acked_seq,
7472		    cum_ack,
7473		    0,
7474		    num_seg,
7475		    num_dup,
7476		    SCTP_LOG_NEW_SACK);
7477	}
7478	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
7479		int off_to_dup, iii;
7480		uint32_t *dupdata, dblock;
7481
7482		/* EY! gotta be careful here */
7483		if (all_bit) {
7484			off_to_dup = (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) +
7485			    sizeof(struct sctp_nr_sack_chunk);
7486		} else {
7487			off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
7488			    (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
7489		}
7490		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
7491			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7492			    sizeof(uint32_t), (uint8_t *) & dblock);
7493			off_to_dup += sizeof(uint32_t);
7494			if (dupdata) {
7495				for (iii = 0; iii < num_dup; iii++) {
7496					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
7497					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7498					    sizeof(uint32_t), (uint8_t *) & dblock);
7499					if (dupdata == NULL)
7500						break;
7501					off_to_dup += sizeof(uint32_t);
7502				}
7503			}
7504		} else {
7505			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
7506			    off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
7507		}
7508	}
7509	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7510		/* reality check */
7511		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
7512			tp1 = TAILQ_LAST(&asoc->sent_queue,
7513			    sctpchunk_listhead);
7514			send_s = tp1->rec.data.TSN_seq + 1;
7515		} else {
7516			send_s = asoc->sending_seq;
7517		}
7518		if (cum_ack == send_s ||
7519		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
7520#ifndef INVARIANTS
7521			struct mbuf *oper;
7522
7523#endif
7524#ifdef INVARIANTS
7525	hopeless_peer:
7526			panic("Impossible sack 1");
7527#else
7528
7529
7530			/*
7531			 * no way, we have not even sent this TSN out yet.
7532			 * Peer is hopelessly messed up with us.
7533			 */
7534	hopeless_peer:
7535			*abort_now = 1;
7536			/* XXX */
7537			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7538			    0, M_DONTWAIT, 1, MT_DATA);
7539			if (oper) {
7540				struct sctp_paramhdr *ph;
7541				uint32_t *ippp;
7542
7543				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7544				    sizeof(uint32_t);
7545				ph = mtod(oper, struct sctp_paramhdr *);
7546				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
7547				ph->param_length = htons(SCTP_BUF_LEN(oper));
7548				ippp = (uint32_t *) (ph + 1);
7549				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
7550			}
7551			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
7552			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
7553			return;
7554#endif
7555		}
7556	}
7557	/**********************/
7558	/* 1) check the range */
7559	/**********************/
7560	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
7561		/* acking something behind */
7562		return;
7563	}
7564	sav_cum_ack = asoc->last_acked_seq;
7565
7566	/* update the Rwnd of the peer */
7567	if (TAILQ_EMPTY(&asoc->sent_queue) &&
7568	    TAILQ_EMPTY(&asoc->send_queue) &&
7569	    (asoc->stream_queue_cnt == 0)
7570	    ) {
7571		/* nothing left on send/sent and strmq */
7572		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7573			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7574			    asoc->peers_rwnd, 0, 0, a_rwnd);
7575		}
7576		asoc->peers_rwnd = a_rwnd;
7577		if (asoc->sent_queue_retran_cnt) {
7578			asoc->sent_queue_retran_cnt = 0;
7579		}
7580		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7581			/* SWS sender side engages */
7582			asoc->peers_rwnd = 0;
7583		}
7584		/* stop any timers */
7585		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7586			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7587			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7588			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7589				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7590					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
7591					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7592					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7593				}
7594			}
7595			net->partial_bytes_acked = 0;
7596			net->flight_size = 0;
7597		}
7598		asoc->total_flight = 0;
7599		asoc->total_flight_count = 0;
7600		return;
7601	}
7602	/*
7603	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
7604	 * things. The total byte count acked is tracked in netAckSz AND
7605	 * netAck2 is used to track the total bytes acked that are un-
7606	 * amibguious and were never retransmitted. We track these on a per
7607	 * destination address basis.
7608	 */
7609	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7610		net->prev_cwnd = net->cwnd;
7611		net->net_ack = 0;
7612		net->net_ack2 = 0;
7613
7614		/*
7615		 * CMT: Reset CUC and Fast recovery algo variables before
7616		 * SACK processing
7617		 */
7618		net->new_pseudo_cumack = 0;
7619		net->will_exit_fast_recovery = 0;
7620	}
7621	/* process the new consecutive TSN first */
7622	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7623	while (tp1) {
7624		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
7625		    MAX_TSN) ||
7626		    last_tsn == tp1->rec.data.TSN_seq) {
7627			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7628				/*
7629				 * ECN Nonce: Add the nonce to the sender's
7630				 * nonce sum
7631				 */
7632				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
7633				accum_moved = 1;
7634				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
7635					/*
7636					 * If it is less than ACKED, it is
7637					 * now no-longer in flight. Higher
7638					 * values may occur during marking
7639					 */
7640					if ((tp1->whoTo->dest_state &
7641					    SCTP_ADDR_UNCONFIRMED) &&
7642					    (tp1->snd_count < 2)) {
7643						/*
7644						 * If there was no retran
7645						 * and the address is
7646						 * un-confirmed and we sent
7647						 * there and are now
7648						 * sacked.. its confirmed,
7649						 * mark it so.
7650						 */
7651						tp1->whoTo->dest_state &=
7652						    ~SCTP_ADDR_UNCONFIRMED;
7653					}
7654					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
7655						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7656							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
7657							    tp1->whoTo->flight_size,
7658							    tp1->book_size,
7659							    (uintptr_t) tp1->whoTo,
7660							    tp1->rec.data.TSN_seq);
7661						}
7662						sctp_flight_size_decrease(tp1);
7663						sctp_total_flight_decrease(stcb, tp1);
7664					}
7665					tp1->whoTo->net_ack += tp1->send_size;
7666
7667					/* CMT SFR and DAC algos */
7668					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7669					tp1->whoTo->saw_newack = 1;
7670
7671					if (tp1->snd_count < 2) {
7672						/*
7673						 * True non-retransmited
7674						 * chunk
7675						 */
7676						tp1->whoTo->net_ack2 +=
7677						    tp1->send_size;
7678
7679						/* update RTO too? */
7680						if (tp1->do_rtt) {
7681							tp1->whoTo->RTO =
7682							    sctp_calculate_rto(stcb,
7683							    asoc, tp1->whoTo,
7684							    &tp1->sent_rcv_time,
7685							    sctp_align_safe_nocopy);
7686							tp1->do_rtt = 0;
7687						}
7688					}
7689					/*
7690					 * CMT: CUCv2 algorithm. From the
7691					 * cumack'd TSNs, for each TSN being
7692					 * acked for the first time, set the
7693					 * following variables for the
7694					 * corresp destination.
7695					 * new_pseudo_cumack will trigger a
7696					 * cwnd update.
7697					 * find_(rtx_)pseudo_cumack will
7698					 * trigger search for the next
7699					 * expected (rtx-)pseudo-cumack.
7700					 */
7701					tp1->whoTo->new_pseudo_cumack = 1;
7702					tp1->whoTo->find_pseudo_cumack = 1;
7703					tp1->whoTo->find_rtx_pseudo_cumack = 1;
7704
7705
7706					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7707						sctp_log_sack(asoc->last_acked_seq,
7708						    cum_ack,
7709						    tp1->rec.data.TSN_seq,
7710						    0,
7711						    0,
7712						    SCTP_LOG_TSN_ACKED);
7713					}
7714					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7715						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7716					}
7717				}
7718				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7719					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7720#ifdef SCTP_AUDITING_ENABLED
7721					sctp_audit_log(0xB3,
7722					    (asoc->sent_queue_retran_cnt & 0x000000ff));
7723#endif
7724				}
7725				if (tp1->rec.data.chunk_was_revoked) {
7726					/* deflate the cwnd */
7727					tp1->whoTo->cwnd -= tp1->book_size;
7728					tp1->rec.data.chunk_was_revoked = 0;
7729				}
7730				tp1->sent = SCTP_DATAGRAM_ACKED;
7731			}
7732		} else {
7733			break;
7734		}
7735		tp1 = TAILQ_NEXT(tp1, sctp_next);
7736	}
7737	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
7738	/* always set this up to cum-ack */
7739	asoc->this_sack_highest_gap = last_tsn;
7740
7741	/* Move offset up to point to gaps/dups */
7742	offset += sizeof(struct sctp_nr_sack_chunk);
7743	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
7744
7745		/* skip corrupt segments */
7746		goto skip_segments;
7747	}
7748	if (num_seg > 0) {
7749
7750		/*
7751		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
7752		 * to be greater than the cumack. Also reset saw_newack to 0
7753		 * for all dests.
7754		 */
7755		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7756			net->saw_newack = 0;
7757			net->this_sack_highest_newack = last_tsn;
7758		}
7759
7760		/*
7761		 * thisSackHighestGap will increase while handling NEW
7762		 * segments this_sack_highest_newack will increase while
7763		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
7764		 * used for CMT DAC algo. saw_newack will also change.
7765		 */
7766
7767		sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
7768		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
7769		    num_seg, num_nr_seg, &ecn_seg_sums);
7770
7771
7772		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7773			/*
7774			 * validate the biggest_tsn_acked in the gap acks if
7775			 * strict adherence is wanted.
7776			 */
7777			if ((biggest_tsn_acked == send_s) ||
7778			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
7779				/*
7780				 * peer is either confused or we are under
7781				 * attack. We must abort.
7782				 */
7783				goto hopeless_peer;
7784			}
7785		}
7786	}
7787skip_segments:
7788	/*******************************************/
7789	/* cancel ALL T3-send timer if accum moved */
7790	/*******************************************/
7791	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7792		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7793			if (net->new_pseudo_cumack)
7794				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7795				    stcb, net,
7796				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
7797
7798		}
7799	} else {
7800		if (accum_moved) {
7801			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7802				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7803				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
7804			}
7805		}
7806	}
7807	/********************************************/
7808	/* drop the acked chunks from the sendqueue */
7809	/********************************************/
7810	asoc->last_acked_seq = cum_ack;
7811
7812	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7813	if (tp1 == NULL)
7814		goto done_with_it;
7815	do {
7816		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
7817		    MAX_TSN)) {
7818			break;
7819		}
7820		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
7821			/* no more sent on list */
7822			printf("Warning, tp1->sent == %d and its now acked?\n",
7823			    tp1->sent);
7824		}
7825		tp2 = TAILQ_NEXT(tp1, sctp_next);
7826		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
7827		if (tp1->pr_sctp_on) {
7828			if (asoc->pr_sctp_cnt != 0)
7829				asoc->pr_sctp_cnt--;
7830		}
7831		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
7832		    (asoc->total_flight > 0)) {
7833#ifdef INVARIANTS
7834			panic("Warning flight size is postive and should be 0");
7835#else
7836			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
7837			    asoc->total_flight);
7838#endif
7839			asoc->total_flight = 0;
7840		}
7841		if (tp1->data) {
7842			/* sa_ignore NO_NULL_CHK */
7843			sctp_free_bufspace(stcb, asoc, tp1, 1);
7844			sctp_m_freem(tp1->data);
7845			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
7846				asoc->sent_queue_cnt_removeable--;
7847			}
7848		}
7849		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7850			sctp_log_sack(asoc->last_acked_seq,
7851			    cum_ack,
7852			    tp1->rec.data.TSN_seq,
7853			    0,
7854			    0,
7855			    SCTP_LOG_FREE_SENT);
7856		}
7857		tp1->data = NULL;
7858		asoc->sent_queue_cnt--;
7859		sctp_free_a_chunk(stcb, tp1);
7860		wake_him++;
7861		tp1 = tp2;
7862	} while (tp1 != NULL);
7863
7864done_with_it:
7865	/* sa_ignore NO_NULL_CHK */
7866	if ((wake_him) && (stcb->sctp_socket)) {
7867#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7868		struct socket *so;
7869
7870#endif
7871		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7872		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7873			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
7874		}
7875#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7876		so = SCTP_INP_SO(stcb->sctp_ep);
7877		atomic_add_int(&stcb->asoc.refcnt, 1);
7878		SCTP_TCB_UNLOCK(stcb);
7879		SCTP_SOCKET_LOCK(so, 1);
7880		SCTP_TCB_LOCK(stcb);
7881		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7882		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7883			/* assoc was freed while we were unlocked */
7884			SCTP_SOCKET_UNLOCK(so, 1);
7885			return;
7886		}
7887#endif
7888		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7889#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7890		SCTP_SOCKET_UNLOCK(so, 1);
7891#endif
7892	} else {
7893		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7894			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
7895		}
7896	}
7897
7898	if (asoc->fast_retran_loss_recovery && accum_moved) {
7899		if (compare_with_wrap(asoc->last_acked_seq,
7900		    asoc->fast_recovery_tsn, MAX_TSN) ||
7901		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
7902			/* Setup so we will exit RFC2582 fast recovery */
7903			will_exit_fast_recovery = 1;
7904		}
7905	}
7906	/*
7907	 * Check for revoked fragments:
7908	 *
7909	 * if Previous sack - Had no frags then we can't have any revoked if
7910	 * Previous sack - Had frag's then - If we now have frags aka
7911	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
7912	 * some of them. else - The peer revoked all ACKED fragments, since
7913	 * we had some before and now we have NONE.
7914	 */
7915
7916	if (num_seg)
7917		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7918
7919	else if (asoc->saw_sack_with_frags) {
7920		int cnt_revoked = 0;
7921
7922		tp1 = TAILQ_FIRST(&asoc->sent_queue);
7923		if (tp1 != NULL) {
7924			/* Peer revoked all dg's marked or acked */
7925			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
7926				/*
7927				 * EY- maybe check only if it is nr_acked
7928				 * nr_marked may not be possible
7929				 */
7930				if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
7931				    (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
7932					/*
7933					 * EY! - TODO: Something previously
7934					 * nr_gapped is reneged, abort the
7935					 * association
7936					 */
7937					return;
7938				}
7939				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
7940				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
7941					tp1->sent = SCTP_DATAGRAM_SENT;
7942					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7943						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
7944						    tp1->whoTo->flight_size,
7945						    tp1->book_size,
7946						    (uintptr_t) tp1->whoTo,
7947						    tp1->rec.data.TSN_seq);
7948					}
7949					sctp_flight_size_increase(tp1);
7950					sctp_total_flight_increase(stcb, tp1);
7951					tp1->rec.data.chunk_was_revoked = 1;
7952					/*
7953					 * To ensure that this increase in
7954					 * flightsize, which is artificial,
7955					 * does not throttle the sender, we
7956					 * also increase the cwnd
7957					 * artificially.
7958					 */
7959					tp1->whoTo->cwnd += tp1->book_size;
7960					cnt_revoked++;
7961				}
7962			}
7963			if (cnt_revoked) {
7964				reneged_all = 1;
7965			}
7966		}
7967		asoc->saw_sack_with_frags = 0;
7968	}
7969	if (num_seg)
7970		asoc->saw_sack_with_frags = 1;
7971	else
7972		asoc->saw_sack_with_frags = 0;
7973
7974	/* EY! - not sure about if there should be an IF */
7975	if (num_nr_seg)
7976		sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7977	else if (asoc->saw_sack_with_nr_frags) {
7978		/*
7979		 * EY!- TODO: all previously nr_gapped chunks have been
7980		 * reneged abort the association
7981		 */
7982		asoc->saw_sack_with_nr_frags = 0;
7983	}
7984	if (num_nr_seg)
7985		asoc->saw_sack_with_nr_frags = 1;
7986	else
7987		asoc->saw_sack_with_nr_frags = 0;
7988	/* JRS - Use the congestion control given in the CC module */
7989	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
7990
7991	if (TAILQ_EMPTY(&asoc->sent_queue)) {
7992		/* nothing left in-flight */
7993		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7994			/* stop all timers */
7995			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7996				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7997					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
7998					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7999					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
8000				}
8001			}
8002			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
8003			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
8004			net->flight_size = 0;
8005			net->partial_bytes_acked = 0;
8006		}
8007		asoc->total_flight = 0;
8008		asoc->total_flight_count = 0;
8009	}
8010	/**********************************/
8011	/* Now what about shutdown issues */
8012	/**********************************/
8013	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
8014		/* nothing left on sendqueue.. consider done */
8015		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
8016			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
8017			    asoc->peers_rwnd, 0, 0, a_rwnd);
8018		}
8019		asoc->peers_rwnd = a_rwnd;
8020		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
8021			/* SWS sender side engages */
8022			asoc->peers_rwnd = 0;
8023		}
8024		/* clean up */
8025		if ((asoc->stream_queue_cnt == 1) &&
8026		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8027		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
8028		    (asoc->locked_on_sending)
8029		    ) {
8030			struct sctp_stream_queue_pending *sp;
8031
8032			/*
8033			 * I may be in a state where we got all across.. but
8034			 * cannot write more due to a shutdown... we abort
8035			 * since the user did not indicate EOR in this case.
8036			 */
8037			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
8038			    sctp_streamhead);
8039			if ((sp) && (sp->length == 0)) {
8040				asoc->locked_on_sending = NULL;
8041				if (sp->msg_is_complete) {
8042					asoc->stream_queue_cnt--;
8043				} else {
8044					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
8045					asoc->stream_queue_cnt--;
8046				}
8047			}
8048		}
8049		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
8050		    (asoc->stream_queue_cnt == 0)) {
8051			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
8052				/* Need to abort here */
8053				struct mbuf *oper;
8054
8055		abort_out_now:
8056				*abort_now = 1;
8057				/* XXX */
8058				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
8059				    0, M_DONTWAIT, 1, MT_DATA);
8060				if (oper) {
8061					struct sctp_paramhdr *ph;
8062					uint32_t *ippp;
8063
8064					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
8065					    sizeof(uint32_t);
8066					ph = mtod(oper, struct sctp_paramhdr *);
8067					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
8068					ph->param_length = htons(SCTP_BUF_LEN(oper));
8069					ippp = (uint32_t *) (ph + 1);
8070					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
8071				}
8072				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
8073				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
8074				return;
8075			} else {
8076				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
8077				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
8078					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8079				}
8080				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
8081				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8082				sctp_stop_timers_for_shutdown(stcb);
8083				sctp_send_shutdown(stcb,
8084				    stcb->asoc.primary_destination);
8085				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
8086				    stcb->sctp_ep, stcb, asoc->primary_destination);
8087				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
8088				    stcb->sctp_ep, stcb, asoc->primary_destination);
8089			}
8090			return;
8091		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
8092		    (asoc->stream_queue_cnt == 0)) {
8093			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
8094				goto abort_out_now;
8095			}
8096			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8097			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
8098			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8099			sctp_send_shutdown_ack(stcb,
8100			    stcb->asoc.primary_destination);
8101
8102			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
8103			    stcb->sctp_ep, stcb, asoc->primary_destination);
8104			return;
8105		}
8106	}
8107	/*
8108	 * Now here we are going to recycle net_ack for a different use...
8109	 * HEADS UP.
8110	 */
8111	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8112		net->net_ack = 0;
8113	}
8114
8115	/*
8116	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
8117	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
8118	 * automatically ensure that.
8119	 */
8120	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
8121		this_sack_lowest_newack = cum_ack;
8122	}
8123	if (num_seg > 0) {
8124		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
8125		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
8126	}
8127	/* JRS - Use the congestion control given in the CC module */
8128	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
8129
8130	/******************************************************************
8131	 *  Here we do the stuff with ECN Nonce checking.
8132	 *  We basically check to see if the nonce sum flag was incorrect
8133	 *  or if resynchronization needs to be done. Also if we catch a
8134	 *  misbehaving receiver we give him the kick.
8135	 ******************************************************************/
8136
8137	if (asoc->ecn_nonce_allowed) {
8138		if (asoc->nonce_sum_check) {
8139			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
8140				if (asoc->nonce_wait_for_ecne == 0) {
8141					struct sctp_tmit_chunk *lchk;
8142
8143					lchk = TAILQ_FIRST(&asoc->send_queue);
8144					asoc->nonce_wait_for_ecne = 1;
8145					if (lchk) {
8146						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
8147					} else {
8148						asoc->nonce_wait_tsn = asoc->sending_seq;
8149					}
8150				} else {
8151					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
8152					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
8153						/*
8154						 * Misbehaving peer. We need
8155						 * to react to this guy
8156						 */
8157						asoc->ecn_allowed = 0;
8158						asoc->ecn_nonce_allowed = 0;
8159					}
8160				}
8161			}
8162		} else {
8163			/* See if Resynchronization Possible */
8164			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
8165				asoc->nonce_sum_check = 1;
8166				/*
8167				 * now we must calculate what the base is.
8168				 * We do this based on two things, we know
8169				 * the total's for all the segments
8170				 * gap-acked in the SACK, its stored in
8171				 * ecn_seg_sums. We also know the SACK's
8172				 * nonce sum, its in nonce_sum_flag. So we
8173				 * can build a truth table to back-calculate
8174				 * the new value of
8175				 * asoc->nonce_sum_expect_base:
8176				 *
8177				 * SACK-flag-Value         Seg-Sums Base 0 0 0
8178				 * 1                    0 1 0 1 1 1 1 0
8179				 */
8180				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
8181			}
8182		}
8183	}
8184	/* Now are we exiting loss recovery ? */
8185	if (will_exit_fast_recovery) {
8186		/* Ok, we must exit fast recovery */
8187		asoc->fast_retran_loss_recovery = 0;
8188	}
8189	if ((asoc->sat_t3_loss_recovery) &&
8190	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
8191	    MAX_TSN) ||
8192	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
8193		/* end satellite t3 loss recovery */
8194		asoc->sat_t3_loss_recovery = 0;
8195	}
8196	/*
8197	 * CMT Fast recovery
8198	 */
8199	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8200		if (net->will_exit_fast_recovery) {
8201			/* Ok, we must exit fast recovery */
8202			net->fast_retran_loss_recovery = 0;
8203		}
8204	}
8205
8206	/* Adjust and set the new rwnd value */
8207	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
8208		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
8209		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
8210	}
8211	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
8212	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
8213	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
8214		/* SWS sender side engages */
8215		asoc->peers_rwnd = 0;
8216	}
8217	if (asoc->peers_rwnd > old_rwnd) {
8218		win_probe_recovery = 1;
8219	}
8220	/*
8221	 * Now we must setup so we have a timer up for anyone with
8222	 * outstanding data.
8223	 */
8224	done_once = 0;
8225again:
8226	j = 0;
8227	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8228		if (win_probe_recovery && (net->window_probe)) {
8229			win_probe_recovered = 1;
8230			/*-
8231			 * Find first chunk that was used with
8232			 * window probe and clear the event. Put
8233			 * it back into the send queue as if has
8234			 * not been sent.
8235			 */
8236			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8237				if (tp1->window_probe) {
8238					sctp_window_probe_recovery(stcb, asoc, net, tp1);
8239					break;
8240				}
8241			}
8242		}
8243		if (net->flight_size) {
8244			j++;
8245			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8246			    stcb->sctp_ep, stcb, net);
8247			if (net->window_probe) {
8248				net->window_probe = 0;
8249			}
8250		} else {
8251			if (net->window_probe) {
8252				net->window_probe = 0;
8253				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8254				    stcb->sctp_ep, stcb, net);
8255			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8256				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
8257				    stcb, net,
8258				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
8259			}
8260			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8261				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8262					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
8263					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
8264					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
8265				}
8266			}
8267		}
8268	}
8269	if ((j == 0) &&
8270	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
8271	    (asoc->sent_queue_retran_cnt == 0) &&
8272	    (win_probe_recovered == 0) &&
8273	    (done_once == 0)) {
8274		/*
8275		 * huh, this should not happen unless all packets are
8276		 * PR-SCTP and marked to skip of course.
8277		 */
8278		if (sctp_fs_audit(asoc)) {
8279			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8280				net->flight_size = 0;
8281			}
8282			asoc->total_flight = 0;
8283			asoc->total_flight_count = 0;
8284			asoc->sent_queue_retran_cnt = 0;
8285			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8286				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
8287					sctp_flight_size_increase(tp1);
8288					sctp_total_flight_increase(stcb, tp1);
8289				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
8290					asoc->sent_queue_retran_cnt++;
8291				}
8292			}
8293		}
8294		done_once = 1;
8295		goto again;
8296	}
8297	/*********************************************/
8298	/* Here we perform PR-SCTP procedures        */
8299	/* (section 4.2)                             */
8300	/*********************************************/
8301	/* C1. update advancedPeerAckPoint */
8302	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
8303		asoc->advanced_peer_ack_point = cum_ack;
8304	}
8305	/* C2. try to further move advancedPeerAckPoint ahead */
8306	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
8307		struct sctp_tmit_chunk *lchk;
8308		uint32_t old_adv_peer_ack_point;
8309
8310		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
8311		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
8312		/* C3. See if we need to send a Fwd-TSN */
8313		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
8314		    MAX_TSN)) {
8315			/*
8316			 * ISSUE with ECN, see FWD-TSN processing for notes
8317			 * on issues that will occur when the ECN NONCE
8318			 * stuff is put into SCTP for cross checking.
8319			 */
8320			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
8321			    MAX_TSN)) {
8322				send_forward_tsn(stcb, asoc);
8323				/*
8324				 * ECN Nonce: Disable Nonce Sum check when
8325				 * FWD TSN is sent and store resync tsn
8326				 */
8327				asoc->nonce_sum_check = 0;
8328				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
8329			} else if (lchk) {
8330				/* try to FR fwd-tsn's that get lost too */
8331				lchk->rec.data.fwd_tsn_cnt++;
8332				if (lchk->rec.data.fwd_tsn_cnt > 3) {
8333					send_forward_tsn(stcb, asoc);
8334					lchk->rec.data.fwd_tsn_cnt = 0;
8335				}
8336			}
8337		}
8338		if (lchk) {
8339			/* Assure a timer is up */
8340			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8341			    stcb->sctp_ep, stcb, lchk->whoTo);
8342		}
8343	}
8344	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
8345		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
8346		    a_rwnd,
8347		    stcb->asoc.peers_rwnd,
8348		    stcb->asoc.total_flight,
8349		    stcb->asoc.total_output_queue_size);
8350	}
8351}
8352