sctp_indata.c revision 228653
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 228653 2011-12-17 19:21:40Z tuexen $");
37
38#include <netinet/sctp_os.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
41#include <netinet/sctp_pcb.h>
42#include <netinet/sctp_header.h>
43#include <netinet/sctputil.h>
44#include <netinet/sctp_output.h>
45#include <netinet/sctp_input.h>
46#include <netinet/sctp_indata.h>
47#include <netinet/sctp_uio.h>
48#include <netinet/sctp_timer.h>
49
50
51/*
52 * NOTES: On the outbound side of things I need to check the sack timer to
53 * see if I should generate a sack into the chunk queue (if I have data to
54 * send that is and will be sending it .. for bundling.
55 *
56 * The callback in sctp_usrreq.c will get called when the socket is read from.
57 * This will cause sctp_service_queues() to get called on the top entry in
58 * the list.
59 */
60
61void
62sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
63{
64	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65}
66
67/* Calculate what the rwnd would be */
68uint32_t
69sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
70{
71	uint32_t calc = 0;
72
73	/*
74	 * This is really set wrong with respect to a 1-2-m socket. Since
75	 * the sb_cc is the count that everyone as put up. When we re-write
76	 * sctp_soreceive then we will fix this so that ONLY this
77	 * associations data is taken into account.
78	 */
79	if (stcb->sctp_socket == NULL)
80		return (calc);
81
82	if (stcb->asoc.sb_cc == 0 &&
83	    asoc->size_on_reasm_queue == 0 &&
84	    asoc->size_on_all_streams == 0) {
85		/* Full rwnd granted */
86		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87		return (calc);
88	}
89	/* get actual space */
90	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91
92	/*
93	 * take out what has NOT been put on socket queue and we yet hold
94	 * for putting up.
95	 */
96	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
97	    asoc->cnt_on_reasm_queue * MSIZE));
98	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
99	    asoc->cnt_on_all_streams * MSIZE));
100
101	if (calc == 0) {
102		/* out of space */
103		return (calc);
104	}
105	/* what is the overhead of all these rwnd's */
106	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
107	/*
108	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
109	 * even it is 0. SWS engaged
110	 */
111	if (calc < stcb->asoc.my_rwnd_control_len) {
112		calc = 1;
113	}
114	return (calc);
115}
116
117
118
119/*
120 * Build out our readq entry based on the incoming packet.
121 */
122struct sctp_queued_to_read *
123sctp_build_readq_entry(struct sctp_tcb *stcb,
124    struct sctp_nets *net,
125    uint32_t tsn, uint32_t ppid,
126    uint32_t context, uint16_t stream_no,
127    uint16_t stream_seq, uint8_t flags,
128    struct mbuf *dm)
129{
130	struct sctp_queued_to_read *read_queue_e = NULL;
131
132	sctp_alloc_a_readq(stcb, read_queue_e);
133	if (read_queue_e == NULL) {
134		goto failed_build;
135	}
136	read_queue_e->sinfo_stream = stream_no;
137	read_queue_e->sinfo_ssn = stream_seq;
138	read_queue_e->sinfo_flags = (flags << 8);
139	read_queue_e->sinfo_ppid = ppid;
140	read_queue_e->sinfo_context = context;
141	read_queue_e->sinfo_timetolive = 0;
142	read_queue_e->sinfo_tsn = tsn;
143	read_queue_e->sinfo_cumtsn = tsn;
144	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
145	read_queue_e->whoFrom = net;
146	read_queue_e->length = 0;
147	atomic_add_int(&net->ref_count, 1);
148	read_queue_e->data = dm;
149	read_queue_e->spec_flags = 0;
150	read_queue_e->tail_mbuf = NULL;
151	read_queue_e->aux_data = NULL;
152	read_queue_e->stcb = stcb;
153	read_queue_e->port_from = stcb->rport;
154	read_queue_e->do_not_ref_stcb = 0;
155	read_queue_e->end_added = 0;
156	read_queue_e->some_taken = 0;
157	read_queue_e->pdapi_aborted = 0;
158failed_build:
159	return (read_queue_e);
160}
161
162
163/*
164 * Build out our readq entry based on the incoming packet.
165 */
166static struct sctp_queued_to_read *
167sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
168    struct sctp_tmit_chunk *chk)
169{
170	struct sctp_queued_to_read *read_queue_e = NULL;
171
172	sctp_alloc_a_readq(stcb, read_queue_e);
173	if (read_queue_e == NULL) {
174		goto failed_build;
175	}
176	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
177	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
178	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
179	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
180	read_queue_e->sinfo_context = stcb->asoc.context;
181	read_queue_e->sinfo_timetolive = 0;
182	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
183	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
184	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
185	read_queue_e->whoFrom = chk->whoTo;
186	read_queue_e->aux_data = NULL;
187	read_queue_e->length = 0;
188	atomic_add_int(&chk->whoTo->ref_count, 1);
189	read_queue_e->data = chk->data;
190	read_queue_e->tail_mbuf = NULL;
191	read_queue_e->stcb = stcb;
192	read_queue_e->port_from = stcb->rport;
193	read_queue_e->spec_flags = 0;
194	read_queue_e->do_not_ref_stcb = 0;
195	read_queue_e->end_added = 0;
196	read_queue_e->some_taken = 0;
197	read_queue_e->pdapi_aborted = 0;
198failed_build:
199	return (read_queue_e);
200}
201
202
203struct mbuf *
204sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
205{
206	struct sctp_extrcvinfo *seinfo;
207	struct sctp_sndrcvinfo *outinfo;
208	struct sctp_rcvinfo *rcvinfo;
209	struct sctp_nxtinfo *nxtinfo;
210	struct cmsghdr *cmh;
211	struct mbuf *ret;
212	int len;
213	int use_extended;
214	int provide_nxt;
215
216	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
217	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
218	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
219		/* user does not want any ancillary data */
220		return (NULL);
221	}
222	len = 0;
223	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
224		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
225	}
226	seinfo = (struct sctp_extrcvinfo *)sinfo;
227	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
228	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
229		provide_nxt = 1;
230		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
231	} else {
232		provide_nxt = 0;
233	}
234	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
235		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
236			use_extended = 1;
237			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238		} else {
239			use_extended = 0;
240			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
241		}
242	} else {
243		use_extended = 0;
244	}
245
246	ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
247	if (ret == NULL) {
248		/* No space */
249		return (ret);
250	}
251	SCTP_BUF_LEN(ret) = 0;
252
253	/* We need a CMSG header followed by the struct */
254	cmh = mtod(ret, struct cmsghdr *);
255	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
256		cmh->cmsg_level = IPPROTO_SCTP;
257		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
258		cmh->cmsg_type = SCTP_RCVINFO;
259		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
260		rcvinfo->rcv_sid = sinfo->sinfo_stream;
261		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
262		rcvinfo->rcv_flags = sinfo->sinfo_flags;
263		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
264		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
265		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
266		rcvinfo->rcv_context = sinfo->sinfo_context;
267		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
268		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
269		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270	}
271	if (provide_nxt) {
272		cmh->cmsg_level = IPPROTO_SCTP;
273		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
274		cmh->cmsg_type = SCTP_NXTINFO;
275		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
276		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
277		nxtinfo->nxt_flags = 0;
278		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
279			nxtinfo->nxt_flags |= SCTP_UNORDERED;
280		}
281		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
282			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
283		}
284		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
285			nxtinfo->nxt_flags |= SCTP_COMPLETE;
286		}
287		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
288		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
289		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
290		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
291		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
292	}
293	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
294		cmh->cmsg_level = IPPROTO_SCTP;
295		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
296		if (use_extended) {
297			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
298			cmh->cmsg_type = SCTP_EXTRCV;
299			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
300			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
301		} else {
302			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
303			cmh->cmsg_type = SCTP_SNDRCV;
304			*outinfo = *sinfo;
305			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
306		}
307	}
308	return (ret);
309}
310
311
312static void
313sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
314{
315	uint32_t gap, i, cumackp1;
316	int fnd = 0;
317
318	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319		return;
320	}
321	cumackp1 = asoc->cumulative_tsn + 1;
322	if (SCTP_TSN_GT(cumackp1, tsn)) {
323		/*
324		 * this tsn is behind the cum ack and thus we don't need to
325		 * worry about it being moved from one to the other.
326		 */
327		return;
328	}
329	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
330	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
331		printf("gap:%x tsn:%x\n", gap, tsn);
332		sctp_print_mapping_array(asoc);
333#ifdef INVARIANTS
334		panic("Things are really messed up now!!");
335#endif
336	}
337	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
338	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
339	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
340		asoc->highest_tsn_inside_nr_map = tsn;
341	}
342	if (tsn == asoc->highest_tsn_inside_map) {
343		/* We must back down to see what the new highest is */
344		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
345			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
346			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
347				asoc->highest_tsn_inside_map = i;
348				fnd = 1;
349				break;
350			}
351		}
352		if (!fnd) {
353			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
354		}
355	}
356}
357
358
359/*
360 * We are delivering currently from the reassembly queue. We must continue to
361 * deliver until we either: 1) run out of space. 2) run out of sequential
362 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363 */
364static void
365sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
366{
367	struct sctp_tmit_chunk *chk, *nchk;
368	uint16_t nxt_todel;
369	uint16_t stream_no;
370	int end = 0;
371	int cntDel;
372	struct sctp_queued_to_read *control, *ctl, *nctl;
373
374	if (stcb == NULL)
375		return;
376
377	cntDel = stream_no = 0;
378	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
379	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
380	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
381		/* socket above is long gone or going.. */
382abandon:
383		asoc->fragmented_delivery_inprogress = 0;
384		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
385			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
386			asoc->size_on_reasm_queue -= chk->send_size;
387			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
388			/*
389			 * Lose the data pointer, since its in the socket
390			 * buffer
391			 */
392			if (chk->data) {
393				sctp_m_freem(chk->data);
394				chk->data = NULL;
395			}
396			/* Now free the address and data */
397			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
398			/* sa_ignore FREED_MEMORY */
399		}
400		return;
401	}
402	SCTP_TCB_LOCK_ASSERT(stcb);
403	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
404		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
405			/* Can't deliver more :< */
406			return;
407		}
408		stream_no = chk->rec.data.stream_number;
409		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
410		if (nxt_todel != chk->rec.data.stream_seq &&
411		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
412			/*
413			 * Not the next sequence to deliver in its stream OR
414			 * unordered
415			 */
416			return;
417		}
418		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
419
420			control = sctp_build_readq_entry_chk(stcb, chk);
421			if (control == NULL) {
422				/* out of memory? */
423				return;
424			}
425			/* save it off for our future deliveries */
426			stcb->asoc.control_pdapi = control;
427			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
428				end = 1;
429			else
430				end = 0;
431			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
432			sctp_add_to_readq(stcb->sctp_ep,
433			    stcb, control, &stcb->sctp_socket->so_rcv, end,
434			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435			cntDel++;
436		} else {
437			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
438				end = 1;
439			else
440				end = 0;
441			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
442			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
443			    stcb->asoc.control_pdapi,
444			    chk->data, end, chk->rec.data.TSN_seq,
445			    &stcb->sctp_socket->so_rcv)) {
446				/*
447				 * something is very wrong, either
448				 * control_pdapi is NULL, or the tail_mbuf
449				 * is corrupt, or there is a EOM already on
450				 * the mbuf chain.
451				 */
452				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
453					goto abandon;
454				} else {
455#ifdef INVARIANTS
456					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
457						panic("This should not happen control_pdapi NULL?");
458					}
459					/* if we did not panic, it was a EOM */
460					panic("Bad chunking ??");
461#else
462					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
463						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
464					}
465					SCTP_PRINTF("Bad chunking ??\n");
466					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
467
468#endif
469					goto abandon;
470				}
471			}
472			cntDel++;
473		}
474		/* pull it we did it */
475		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
476		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
477			asoc->fragmented_delivery_inprogress = 0;
478			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
479				asoc->strmin[stream_no].last_sequence_delivered++;
480			}
481			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
482				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
483			}
484		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
485			/*
486			 * turn the flag back on since we just  delivered
487			 * yet another one.
488			 */
489			asoc->fragmented_delivery_inprogress = 1;
490		}
491		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
492		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
493		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
494		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
495
496		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
497		asoc->size_on_reasm_queue -= chk->send_size;
498		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
499		/* free up the chk */
500		chk->data = NULL;
501		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
502
503		if (asoc->fragmented_delivery_inprogress == 0) {
504			/*
505			 * Now lets see if we can deliver the next one on
506			 * the stream
507			 */
508			struct sctp_stream_in *strm;
509
510			strm = &asoc->strmin[stream_no];
511			nxt_todel = strm->last_sequence_delivered + 1;
512			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
513				/* Deliver more if we can. */
514				if (nxt_todel == ctl->sinfo_ssn) {
515					TAILQ_REMOVE(&strm->inqueue, ctl, next);
516					asoc->size_on_all_streams -= ctl->length;
517					sctp_ucount_decr(asoc->cnt_on_all_streams);
518					strm->last_sequence_delivered++;
519					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
520					sctp_add_to_readq(stcb->sctp_ep, stcb,
521					    ctl,
522					    &stcb->sctp_socket->so_rcv, 1,
523					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
524				} else {
525					break;
526				}
527				nxt_todel = strm->last_sequence_delivered + 1;
528			}
529			break;
530		}
531	}
532}
533
534/*
535 * Queue the chunk either right into the socket buffer if it is the next one
536 * to go OR put it in the correct place in the delivery queue.  If we do
537 * append to the so_buf, keep doing so until we are out of order. One big
538 * question still remains, what to do when the socket buffer is FULL??
539 */
540static void
541sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
542    struct sctp_queued_to_read *control, int *abort_flag)
543{
544	/*
545	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
546	 * all the data in one stream this could happen quite rapidly. One
547	 * could use the TSN to keep track of things, but this scheme breaks
548	 * down in the other type of stream useage that could occur. Send a
549	 * single msg to stream 0, send 4Billion messages to stream 1, now
550	 * send a message to stream 0. You have a situation where the TSN
551	 * has wrapped but not in the stream. Is this worth worrying about
552	 * or should we just change our queue sort at the bottom to be by
553	 * TSN.
554	 *
555	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
556	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
557	 * assignment this could happen... and I don't see how this would be
558	 * a violation. So for now I am undecided an will leave the sort by
559	 * SSN alone. Maybe a hybred approach is the answer
560	 *
561	 */
562	struct sctp_stream_in *strm;
563	struct sctp_queued_to_read *at;
564	int queue_needed;
565	uint16_t nxt_todel;
566	struct mbuf *oper;
567
568	queue_needed = 1;
569	asoc->size_on_all_streams += control->length;
570	sctp_ucount_incr(asoc->cnt_on_all_streams);
571	strm = &asoc->strmin[control->sinfo_stream];
572	nxt_todel = strm->last_sequence_delivered + 1;
573	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
574		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
575	}
576	SCTPDBG(SCTP_DEBUG_INDATA1,
577	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
578	    (uint32_t) control->sinfo_stream,
579	    (uint32_t) strm->last_sequence_delivered,
580	    (uint32_t) nxt_todel);
581	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
582		/* The incoming sseq is behind where we last delivered? */
583		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
584		    control->sinfo_ssn, strm->last_sequence_delivered);
585protocol_error:
586		/*
587		 * throw it in the stream so it gets cleaned up in
588		 * association destruction
589		 */
590		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
591		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
592		    0, M_DONTWAIT, 1, MT_DATA);
593		if (oper) {
594			struct sctp_paramhdr *ph;
595			uint32_t *ippp;
596
597			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
598			    (sizeof(uint32_t) * 3);
599			ph = mtod(oper, struct sctp_paramhdr *);
600			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
601			ph->param_length = htons(SCTP_BUF_LEN(oper));
602			ippp = (uint32_t *) (ph + 1);
603			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
604			ippp++;
605			*ippp = control->sinfo_tsn;
606			ippp++;
607			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
608		}
609		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
610		sctp_abort_an_association(stcb->sctp_ep, stcb,
611		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
612
613		*abort_flag = 1;
614		return;
615
616	}
617	if (nxt_todel == control->sinfo_ssn) {
618		/* can be delivered right away? */
619		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
620			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
621		}
622		/* EY it wont be queued if it could be delivered directly */
623		queue_needed = 0;
624		asoc->size_on_all_streams -= control->length;
625		sctp_ucount_decr(asoc->cnt_on_all_streams);
626		strm->last_sequence_delivered++;
627
628		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
629		sctp_add_to_readq(stcb->sctp_ep, stcb,
630		    control,
631		    &stcb->sctp_socket->so_rcv, 1,
632		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
633		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
634			/* all delivered */
635			nxt_todel = strm->last_sequence_delivered + 1;
636			if (nxt_todel == control->sinfo_ssn) {
637				TAILQ_REMOVE(&strm->inqueue, control, next);
638				asoc->size_on_all_streams -= control->length;
639				sctp_ucount_decr(asoc->cnt_on_all_streams);
640				strm->last_sequence_delivered++;
641				/*
642				 * We ignore the return of deliver_data here
643				 * since we always can hold the chunk on the
644				 * d-queue. And we have a finite number that
645				 * can be delivered from the strq.
646				 */
647				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
648					sctp_log_strm_del(control, NULL,
649					    SCTP_STR_LOG_FROM_IMMED_DEL);
650				}
651				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
652				sctp_add_to_readq(stcb->sctp_ep, stcb,
653				    control,
654				    &stcb->sctp_socket->so_rcv, 1,
655				    SCTP_READ_LOCK_NOT_HELD,
656				    SCTP_SO_NOT_LOCKED);
657				continue;
658			}
659			break;
660		}
661	}
662	if (queue_needed) {
663		/*
664		 * Ok, we did not deliver this guy, find the correct place
665		 * to put it on the queue.
666		 */
667		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
668			goto protocol_error;
669		}
670		if (TAILQ_EMPTY(&strm->inqueue)) {
671			/* Empty queue */
672			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
673				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
674			}
675			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
676		} else {
677			TAILQ_FOREACH(at, &strm->inqueue, next) {
678				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
679					/*
680					 * one in queue is bigger than the
681					 * new one, insert before this one
682					 */
683					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
684						sctp_log_strm_del(control, at,
685						    SCTP_STR_LOG_FROM_INSERT_MD);
686					}
687					TAILQ_INSERT_BEFORE(at, control, next);
688					break;
689				} else if (at->sinfo_ssn == control->sinfo_ssn) {
690					/*
691					 * Gak, He sent me a duplicate str
692					 * seq number
693					 */
694					/*
695					 * foo bar, I guess I will just free
696					 * this new guy, should we abort
697					 * too? FIX ME MAYBE? Or it COULD be
698					 * that the SSN's have wrapped.
699					 * Maybe I should compare to TSN
700					 * somehow... sigh for now just blow
701					 * away the chunk!
702					 */
703
704					if (control->data)
705						sctp_m_freem(control->data);
706					control->data = NULL;
707					asoc->size_on_all_streams -= control->length;
708					sctp_ucount_decr(asoc->cnt_on_all_streams);
709					if (control->whoFrom) {
710						sctp_free_remote_addr(control->whoFrom);
711						control->whoFrom = NULL;
712					}
713					sctp_free_a_readq(stcb, control);
714					return;
715				} else {
716					if (TAILQ_NEXT(at, next) == NULL) {
717						/*
718						 * We are at the end, insert
719						 * it after this one
720						 */
721						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
722							sctp_log_strm_del(control, at,
723							    SCTP_STR_LOG_FROM_INSERT_TL);
724						}
725						TAILQ_INSERT_AFTER(&strm->inqueue,
726						    at, control, next);
727						break;
728					}
729				}
730			}
731		}
732	}
733}
734
735/*
736 * Returns two things: You get the total size of the deliverable parts of the
737 * first fragmented message on the reassembly queue. And you get a 1 back if
738 * all of the message is ready or a 0 back if the message is still incomplete
739 */
740static int
741sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
742{
743	struct sctp_tmit_chunk *chk;
744	uint32_t tsn;
745
746	*t_size = 0;
747	chk = TAILQ_FIRST(&asoc->reasmqueue);
748	if (chk == NULL) {
749		/* nothing on the queue */
750		return (0);
751	}
752	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
753		/* Not a first on the queue */
754		return (0);
755	}
756	tsn = chk->rec.data.TSN_seq;
757	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
758		if (tsn != chk->rec.data.TSN_seq) {
759			return (0);
760		}
761		*t_size += chk->send_size;
762		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
763			return (1);
764		}
765		tsn++;
766	}
767	return (0);
768}
769
770static void
771sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
772{
773	struct sctp_tmit_chunk *chk;
774	uint16_t nxt_todel;
775	uint32_t tsize, pd_point;
776
777doit_again:
778	chk = TAILQ_FIRST(&asoc->reasmqueue);
779	if (chk == NULL) {
780		/* Huh? */
781		asoc->size_on_reasm_queue = 0;
782		asoc->cnt_on_reasm_queue = 0;
783		return;
784	}
785	if (asoc->fragmented_delivery_inprogress == 0) {
786		nxt_todel =
787		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
788		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
789		    (nxt_todel == chk->rec.data.stream_seq ||
790		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
791			/*
792			 * Yep the first one is here and its ok to deliver
793			 * but should we?
794			 */
795			if (stcb->sctp_socket) {
796				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
797				    stcb->sctp_ep->partial_delivery_point);
798			} else {
799				pd_point = stcb->sctp_ep->partial_delivery_point;
800			}
801			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
802
803				/*
804				 * Yes, we setup to start reception, by
805				 * backing down the TSN just in case we
806				 * can't deliver. If we
807				 */
808				asoc->fragmented_delivery_inprogress = 1;
809				asoc->tsn_last_delivered =
810				    chk->rec.data.TSN_seq - 1;
811				asoc->str_of_pdapi =
812				    chk->rec.data.stream_number;
813				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
814				asoc->pdapi_ppid = chk->rec.data.payloadtype;
815				asoc->fragment_flags = chk->rec.data.rcv_flags;
816				sctp_service_reassembly(stcb, asoc);
817			}
818		}
819	} else {
820		/*
821		 * Service re-assembly will deliver stream data queued at
822		 * the end of fragmented delivery.. but it wont know to go
823		 * back and call itself again... we do that here with the
824		 * got doit_again
825		 */
826		sctp_service_reassembly(stcb, asoc);
827		if (asoc->fragmented_delivery_inprogress == 0) {
828			/*
829			 * finished our Fragmented delivery, could be more
830			 * waiting?
831			 */
832			goto doit_again;
833		}
834	}
835}
836
837/*
838 * Dump onto the re-assembly queue, in its proper place. After dumping on the
839 * queue, see if anthing can be delivered. If so pull it off (or as much as
840 * we can. If we run out of space then we must dump what we can and set the
841 * appropriate flag to say we queued what we could.
842 */
843static void
844sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
845    struct sctp_tmit_chunk *chk, int *abort_flag)
846{
847	struct mbuf *oper;
848	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
849	struct sctp_tmit_chunk *at, *prev, *next;
850
851	prev = next = NULL;
852	cum_ackp1 = asoc->tsn_last_delivered + 1;
853	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
854		/* This is the first one on the queue */
855		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
856		/*
857		 * we do not check for delivery of anything when only one
858		 * fragment is here
859		 */
860		asoc->size_on_reasm_queue = chk->send_size;
861		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
862		if (chk->rec.data.TSN_seq == cum_ackp1) {
863			if (asoc->fragmented_delivery_inprogress == 0 &&
864			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
865			    SCTP_DATA_FIRST_FRAG) {
866				/*
867				 * An empty queue, no delivery inprogress,
868				 * we hit the next one and it does NOT have
869				 * a FIRST fragment mark.
870				 */
871				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
872				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
873				    0, M_DONTWAIT, 1, MT_DATA);
874
875				if (oper) {
876					struct sctp_paramhdr *ph;
877					uint32_t *ippp;
878
879					SCTP_BUF_LEN(oper) =
880					    sizeof(struct sctp_paramhdr) +
881					    (sizeof(uint32_t) * 3);
882					ph = mtod(oper, struct sctp_paramhdr *);
883					ph->param_type =
884					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
885					ph->param_length = htons(SCTP_BUF_LEN(oper));
886					ippp = (uint32_t *) (ph + 1);
887					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
888					ippp++;
889					*ippp = chk->rec.data.TSN_seq;
890					ippp++;
891					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
892
893				}
894				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
895				sctp_abort_an_association(stcb->sctp_ep, stcb,
896				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
897				*abort_flag = 1;
898			} else if (asoc->fragmented_delivery_inprogress &&
899			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
900				/*
901				 * We are doing a partial delivery and the
902				 * NEXT chunk MUST be either the LAST or
903				 * MIDDLE fragment NOT a FIRST
904				 */
905				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
906				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
907				    0, M_DONTWAIT, 1, MT_DATA);
908				if (oper) {
909					struct sctp_paramhdr *ph;
910					uint32_t *ippp;
911
912					SCTP_BUF_LEN(oper) =
913					    sizeof(struct sctp_paramhdr) +
914					    (3 * sizeof(uint32_t));
915					ph = mtod(oper, struct sctp_paramhdr *);
916					ph->param_type =
917					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
918					ph->param_length = htons(SCTP_BUF_LEN(oper));
919					ippp = (uint32_t *) (ph + 1);
920					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
921					ippp++;
922					*ippp = chk->rec.data.TSN_seq;
923					ippp++;
924					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
925				}
926				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
927				sctp_abort_an_association(stcb->sctp_ep, stcb,
928				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
929				*abort_flag = 1;
930			} else if (asoc->fragmented_delivery_inprogress) {
931				/*
932				 * Here we are ok with a MIDDLE or LAST
933				 * piece
934				 */
935				if (chk->rec.data.stream_number !=
936				    asoc->str_of_pdapi) {
937					/* Got to be the right STR No */
938					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
939					    chk->rec.data.stream_number,
940					    asoc->str_of_pdapi);
941					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
942					    0, M_DONTWAIT, 1, MT_DATA);
943					if (oper) {
944						struct sctp_paramhdr *ph;
945						uint32_t *ippp;
946
947						SCTP_BUF_LEN(oper) =
948						    sizeof(struct sctp_paramhdr) +
949						    (sizeof(uint32_t) * 3);
950						ph = mtod(oper,
951						    struct sctp_paramhdr *);
952						ph->param_type =
953						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
954						ph->param_length =
955						    htons(SCTP_BUF_LEN(oper));
956						ippp = (uint32_t *) (ph + 1);
957						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
958						ippp++;
959						*ippp = chk->rec.data.TSN_seq;
960						ippp++;
961						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
962					}
963					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
964					sctp_abort_an_association(stcb->sctp_ep,
965					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
966					*abort_flag = 1;
967				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
968					    SCTP_DATA_UNORDERED &&
969				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
970					/* Got to be the right STR Seq */
971					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
972					    chk->rec.data.stream_seq,
973					    asoc->ssn_of_pdapi);
974					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
975					    0, M_DONTWAIT, 1, MT_DATA);
976					if (oper) {
977						struct sctp_paramhdr *ph;
978						uint32_t *ippp;
979
980						SCTP_BUF_LEN(oper) =
981						    sizeof(struct sctp_paramhdr) +
982						    (3 * sizeof(uint32_t));
983						ph = mtod(oper,
984						    struct sctp_paramhdr *);
985						ph->param_type =
986						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
987						ph->param_length =
988						    htons(SCTP_BUF_LEN(oper));
989						ippp = (uint32_t *) (ph + 1);
990						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
991						ippp++;
992						*ippp = chk->rec.data.TSN_seq;
993						ippp++;
994						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
995
996					}
997					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
998					sctp_abort_an_association(stcb->sctp_ep,
999					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1000					*abort_flag = 1;
1001				}
1002			}
1003		}
1004		return;
1005	}
1006	/* Find its place */
1007	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1008		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1009			/*
1010			 * one in queue is bigger than the new one, insert
1011			 * before this one
1012			 */
1013			/* A check */
1014			asoc->size_on_reasm_queue += chk->send_size;
1015			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1016			next = at;
1017			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1018			break;
1019		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1020			/* Gak, He sent me a duplicate str seq number */
1021			/*
1022			 * foo bar, I guess I will just free this new guy,
1023			 * should we abort too? FIX ME MAYBE? Or it COULD be
1024			 * that the SSN's have wrapped. Maybe I should
1025			 * compare to TSN somehow... sigh for now just blow
1026			 * away the chunk!
1027			 */
1028			if (chk->data) {
1029				sctp_m_freem(chk->data);
1030				chk->data = NULL;
1031			}
1032			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1033			return;
1034		} else {
1035			last_tsn = at->rec.data.TSN_seq;
1036			prev = at;
1037			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1038				/*
1039				 * We are at the end, insert it after this
1040				 * one
1041				 */
1042				/* check it first */
1043				asoc->size_on_reasm_queue += chk->send_size;
1044				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1045				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1046				break;
1047			}
1048		}
1049	}
1050	/* Now the audits */
1051	if (prev) {
1052		prev_tsn = chk->rec.data.TSN_seq - 1;
1053		if (prev_tsn == prev->rec.data.TSN_seq) {
1054			/*
1055			 * Ok the one I am dropping onto the end is the
1056			 * NEXT. A bit of valdiation here.
1057			 */
1058			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1059			    SCTP_DATA_FIRST_FRAG ||
1060			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1061			    SCTP_DATA_MIDDLE_FRAG) {
1062				/*
1063				 * Insert chk MUST be a MIDDLE or LAST
1064				 * fragment
1065				 */
1066				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1067				    SCTP_DATA_FIRST_FRAG) {
1068					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1069					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1070					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1071					    0, M_DONTWAIT, 1, MT_DATA);
1072					if (oper) {
1073						struct sctp_paramhdr *ph;
1074						uint32_t *ippp;
1075
1076						SCTP_BUF_LEN(oper) =
1077						    sizeof(struct sctp_paramhdr) +
1078						    (3 * sizeof(uint32_t));
1079						ph = mtod(oper,
1080						    struct sctp_paramhdr *);
1081						ph->param_type =
1082						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1083						ph->param_length =
1084						    htons(SCTP_BUF_LEN(oper));
1085						ippp = (uint32_t *) (ph + 1);
1086						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1087						ippp++;
1088						*ippp = chk->rec.data.TSN_seq;
1089						ippp++;
1090						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1091
1092					}
1093					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1094					sctp_abort_an_association(stcb->sctp_ep,
1095					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1096					*abort_flag = 1;
1097					return;
1098				}
1099				if (chk->rec.data.stream_number !=
1100				    prev->rec.data.stream_number) {
1101					/*
1102					 * Huh, need the correct STR here,
1103					 * they must be the same.
1104					 */
1105					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1106					    chk->rec.data.stream_number,
1107					    prev->rec.data.stream_number);
1108					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1109					    0, M_DONTWAIT, 1, MT_DATA);
1110					if (oper) {
1111						struct sctp_paramhdr *ph;
1112						uint32_t *ippp;
1113
1114						SCTP_BUF_LEN(oper) =
1115						    sizeof(struct sctp_paramhdr) +
1116						    (3 * sizeof(uint32_t));
1117						ph = mtod(oper,
1118						    struct sctp_paramhdr *);
1119						ph->param_type =
1120						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1121						ph->param_length =
1122						    htons(SCTP_BUF_LEN(oper));
1123						ippp = (uint32_t *) (ph + 1);
1124						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1125						ippp++;
1126						*ippp = chk->rec.data.TSN_seq;
1127						ippp++;
1128						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1129					}
1130					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1131					sctp_abort_an_association(stcb->sctp_ep,
1132					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1133
1134					*abort_flag = 1;
1135					return;
1136				}
1137				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1138				    chk->rec.data.stream_seq !=
1139				    prev->rec.data.stream_seq) {
1140					/*
1141					 * Huh, need the correct STR here,
1142					 * they must be the same.
1143					 */
1144					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1145					    chk->rec.data.stream_seq,
1146					    prev->rec.data.stream_seq);
1147					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1148					    0, M_DONTWAIT, 1, MT_DATA);
1149					if (oper) {
1150						struct sctp_paramhdr *ph;
1151						uint32_t *ippp;
1152
1153						SCTP_BUF_LEN(oper) =
1154						    sizeof(struct sctp_paramhdr) +
1155						    (3 * sizeof(uint32_t));
1156						ph = mtod(oper,
1157						    struct sctp_paramhdr *);
1158						ph->param_type =
1159						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1160						ph->param_length =
1161						    htons(SCTP_BUF_LEN(oper));
1162						ippp = (uint32_t *) (ph + 1);
1163						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1164						ippp++;
1165						*ippp = chk->rec.data.TSN_seq;
1166						ippp++;
1167						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1168					}
1169					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1170					sctp_abort_an_association(stcb->sctp_ep,
1171					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1172
1173					*abort_flag = 1;
1174					return;
1175				}
1176			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1177			    SCTP_DATA_LAST_FRAG) {
1178				/* Insert chk MUST be a FIRST */
1179				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1180				    SCTP_DATA_FIRST_FRAG) {
1181					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1182					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1183					    0, M_DONTWAIT, 1, MT_DATA);
1184					if (oper) {
1185						struct sctp_paramhdr *ph;
1186						uint32_t *ippp;
1187
1188						SCTP_BUF_LEN(oper) =
1189						    sizeof(struct sctp_paramhdr) +
1190						    (3 * sizeof(uint32_t));
1191						ph = mtod(oper,
1192						    struct sctp_paramhdr *);
1193						ph->param_type =
1194						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1195						ph->param_length =
1196						    htons(SCTP_BUF_LEN(oper));
1197						ippp = (uint32_t *) (ph + 1);
1198						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1199						ippp++;
1200						*ippp = chk->rec.data.TSN_seq;
1201						ippp++;
1202						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1203
1204					}
1205					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1206					sctp_abort_an_association(stcb->sctp_ep,
1207					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1208
1209					*abort_flag = 1;
1210					return;
1211				}
1212			}
1213		}
1214	}
1215	if (next) {
1216		post_tsn = chk->rec.data.TSN_seq + 1;
1217		if (post_tsn == next->rec.data.TSN_seq) {
1218			/*
1219			 * Ok the one I am inserting ahead of is my NEXT
1220			 * one. A bit of valdiation here.
1221			 */
1222			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1223				/* Insert chk MUST be a last fragment */
1224				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1225				    != SCTP_DATA_LAST_FRAG) {
1226					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1227					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1228					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1229					    0, M_DONTWAIT, 1, MT_DATA);
1230					if (oper) {
1231						struct sctp_paramhdr *ph;
1232						uint32_t *ippp;
1233
1234						SCTP_BUF_LEN(oper) =
1235						    sizeof(struct sctp_paramhdr) +
1236						    (3 * sizeof(uint32_t));
1237						ph = mtod(oper,
1238						    struct sctp_paramhdr *);
1239						ph->param_type =
1240						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1241						ph->param_length =
1242						    htons(SCTP_BUF_LEN(oper));
1243						ippp = (uint32_t *) (ph + 1);
1244						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1245						ippp++;
1246						*ippp = chk->rec.data.TSN_seq;
1247						ippp++;
1248						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1249					}
1250					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1251					sctp_abort_an_association(stcb->sctp_ep,
1252					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1253
1254					*abort_flag = 1;
1255					return;
1256				}
1257			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1258				    SCTP_DATA_MIDDLE_FRAG ||
1259				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1260			    SCTP_DATA_LAST_FRAG) {
1261				/*
1262				 * Insert chk CAN be MIDDLE or FIRST NOT
1263				 * LAST
1264				 */
1265				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1266				    SCTP_DATA_LAST_FRAG) {
1267					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1268					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1269					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1270					    0, M_DONTWAIT, 1, MT_DATA);
1271					if (oper) {
1272						struct sctp_paramhdr *ph;
1273						uint32_t *ippp;
1274
1275						SCTP_BUF_LEN(oper) =
1276						    sizeof(struct sctp_paramhdr) +
1277						    (3 * sizeof(uint32_t));
1278						ph = mtod(oper,
1279						    struct sctp_paramhdr *);
1280						ph->param_type =
1281						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1282						ph->param_length =
1283						    htons(SCTP_BUF_LEN(oper));
1284						ippp = (uint32_t *) (ph + 1);
1285						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1286						ippp++;
1287						*ippp = chk->rec.data.TSN_seq;
1288						ippp++;
1289						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1290
1291					}
1292					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1293					sctp_abort_an_association(stcb->sctp_ep,
1294					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1295
1296					*abort_flag = 1;
1297					return;
1298				}
1299				if (chk->rec.data.stream_number !=
1300				    next->rec.data.stream_number) {
1301					/*
1302					 * Huh, need the correct STR here,
1303					 * they must be the same.
1304					 */
1305					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1306					    chk->rec.data.stream_number,
1307					    next->rec.data.stream_number);
1308					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1309					    0, M_DONTWAIT, 1, MT_DATA);
1310					if (oper) {
1311						struct sctp_paramhdr *ph;
1312						uint32_t *ippp;
1313
1314						SCTP_BUF_LEN(oper) =
1315						    sizeof(struct sctp_paramhdr) +
1316						    (3 * sizeof(uint32_t));
1317						ph = mtod(oper,
1318						    struct sctp_paramhdr *);
1319						ph->param_type =
1320						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1321						ph->param_length =
1322						    htons(SCTP_BUF_LEN(oper));
1323						ippp = (uint32_t *) (ph + 1);
1324						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1325						ippp++;
1326						*ippp = chk->rec.data.TSN_seq;
1327						ippp++;
1328						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1329
1330					}
1331					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1332					sctp_abort_an_association(stcb->sctp_ep,
1333					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1334
1335					*abort_flag = 1;
1336					return;
1337				}
1338				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1339				    chk->rec.data.stream_seq !=
1340				    next->rec.data.stream_seq) {
1341					/*
1342					 * Huh, need the correct STR here,
1343					 * they must be the same.
1344					 */
1345					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1346					    chk->rec.data.stream_seq,
1347					    next->rec.data.stream_seq);
1348					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1349					    0, M_DONTWAIT, 1, MT_DATA);
1350					if (oper) {
1351						struct sctp_paramhdr *ph;
1352						uint32_t *ippp;
1353
1354						SCTP_BUF_LEN(oper) =
1355						    sizeof(struct sctp_paramhdr) +
1356						    (3 * sizeof(uint32_t));
1357						ph = mtod(oper,
1358						    struct sctp_paramhdr *);
1359						ph->param_type =
1360						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1361						ph->param_length =
1362						    htons(SCTP_BUF_LEN(oper));
1363						ippp = (uint32_t *) (ph + 1);
1364						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1365						ippp++;
1366						*ippp = chk->rec.data.TSN_seq;
1367						ippp++;
1368						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1369					}
1370					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1371					sctp_abort_an_association(stcb->sctp_ep,
1372					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1373
1374					*abort_flag = 1;
1375					return;
1376				}
1377			}
1378		}
1379	}
1380	/* Do we need to do some delivery? check */
1381	sctp_deliver_reasm_check(stcb, asoc);
1382}
1383
1384/*
1385 * This is an unfortunate routine. It checks to make sure a evil guy is not
1386 * stuffing us full of bad packet fragments. A broken peer could also do this
1387 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1388 * :< more cycles.
1389 */
1390static int
1391sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1392    uint32_t TSN_seq)
1393{
1394	struct sctp_tmit_chunk *at;
1395	uint32_t tsn_est;
1396
1397	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1398		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1399			/* is it one bigger? */
1400			tsn_est = at->rec.data.TSN_seq + 1;
1401			if (tsn_est == TSN_seq) {
1402				/* yep. It better be a last then */
1403				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1404				    SCTP_DATA_LAST_FRAG) {
1405					/*
1406					 * Ok this guy belongs next to a guy
1407					 * that is NOT last, it should be a
1408					 * middle/last, not a complete
1409					 * chunk.
1410					 */
1411					return (1);
1412				} else {
1413					/*
1414					 * This guy is ok since its a LAST
1415					 * and the new chunk is a fully
1416					 * self- contained one.
1417					 */
1418					return (0);
1419				}
1420			}
1421		} else if (TSN_seq == at->rec.data.TSN_seq) {
1422			/* Software error since I have a dup? */
1423			return (1);
1424		} else {
1425			/*
1426			 * Ok, 'at' is larger than new chunk but does it
1427			 * need to be right before it.
1428			 */
1429			tsn_est = TSN_seq + 1;
1430			if (tsn_est == at->rec.data.TSN_seq) {
1431				/* Yep, It better be a first */
1432				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1433				    SCTP_DATA_FIRST_FRAG) {
1434					return (1);
1435				} else {
1436					return (0);
1437				}
1438			}
1439		}
1440	}
1441	return (0);
1442}
1443
1444
1445static int
1446sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1447    struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1448    struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1449    int *break_flag, int last_chunk)
1450{
1451	/* Process a data chunk */
1452	/* struct sctp_tmit_chunk *chk; */
1453	struct sctp_tmit_chunk *chk;
1454	uint32_t tsn, gap;
1455	struct mbuf *dmbuf;
1456	int the_len;
1457	int need_reasm_check = 0;
1458	uint16_t strmno, strmseq;
1459	struct mbuf *oper;
1460	struct sctp_queued_to_read *control;
1461	int ordered;
1462	uint32_t protocol_id;
1463	uint8_t chunk_flags;
1464	struct sctp_stream_reset_list *liste;
1465
1466	chk = NULL;
1467	tsn = ntohl(ch->dp.tsn);
1468	chunk_flags = ch->ch.chunk_flags;
1469	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1470		asoc->send_sack = 1;
1471	}
1472	protocol_id = ch->dp.protocol_id;
1473	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1474	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1475		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1476	}
1477	if (stcb == NULL) {
1478		return (0);
1479	}
1480	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1481	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1482		/* It is a duplicate */
1483		SCTP_STAT_INCR(sctps_recvdupdata);
1484		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1485			/* Record a dup for the next outbound sack */
1486			asoc->dup_tsns[asoc->numduptsns] = tsn;
1487			asoc->numduptsns++;
1488		}
1489		asoc->send_sack = 1;
1490		return (0);
1491	}
1492	/* Calculate the number of TSN's between the base and this TSN */
1493	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1494	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1495		/* Can't hold the bit in the mapping at max array, toss it */
1496		return (0);
1497	}
1498	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1499		SCTP_TCB_LOCK_ASSERT(stcb);
1500		if (sctp_expand_mapping_array(asoc, gap)) {
1501			/* Can't expand, drop it */
1502			return (0);
1503		}
1504	}
1505	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1506		*high_tsn = tsn;
1507	}
1508	/* See if we have received this one already */
1509	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1510	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1511		SCTP_STAT_INCR(sctps_recvdupdata);
1512		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1513			/* Record a dup for the next outbound sack */
1514			asoc->dup_tsns[asoc->numduptsns] = tsn;
1515			asoc->numduptsns++;
1516		}
1517		asoc->send_sack = 1;
1518		return (0);
1519	}
1520	/*
1521	 * Check to see about the GONE flag, duplicates would cause a sack
1522	 * to be sent up above
1523	 */
1524	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1525	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1526	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1527	    ) {
1528		/*
1529		 * wait a minute, this guy is gone, there is no longer a
1530		 * receiver. Send peer an ABORT!
1531		 */
1532		struct mbuf *op_err;
1533
1534		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1535		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1536		*abort_flag = 1;
1537		return (0);
1538	}
1539	/*
1540	 * Now before going further we see if there is room. If NOT then we
1541	 * MAY let one through only IF this TSN is the one we are waiting
1542	 * for on a partial delivery API.
1543	 */
1544
1545	/* now do the tests */
1546	if (((asoc->cnt_on_all_streams +
1547	    asoc->cnt_on_reasm_queue +
1548	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1549	    (((int)asoc->my_rwnd) <= 0)) {
1550		/*
1551		 * When we have NO room in the rwnd we check to make sure
1552		 * the reader is doing its job...
1553		 */
1554		if (stcb->sctp_socket->so_rcv.sb_cc) {
1555			/* some to read, wake-up */
1556#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1557			struct socket *so;
1558
1559			so = SCTP_INP_SO(stcb->sctp_ep);
1560			atomic_add_int(&stcb->asoc.refcnt, 1);
1561			SCTP_TCB_UNLOCK(stcb);
1562			SCTP_SOCKET_LOCK(so, 1);
1563			SCTP_TCB_LOCK(stcb);
1564			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1565			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1566				/* assoc was freed while we were unlocked */
1567				SCTP_SOCKET_UNLOCK(so, 1);
1568				return (0);
1569			}
1570#endif
1571			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1572#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1573			SCTP_SOCKET_UNLOCK(so, 1);
1574#endif
1575		}
1576		/* now is it in the mapping array of what we have accepted? */
1577		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1578		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1579			/* Nope not in the valid range dump it */
1580			sctp_set_rwnd(stcb, asoc);
1581			if ((asoc->cnt_on_all_streams +
1582			    asoc->cnt_on_reasm_queue +
1583			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1584				SCTP_STAT_INCR(sctps_datadropchklmt);
1585			} else {
1586				SCTP_STAT_INCR(sctps_datadroprwnd);
1587			}
1588			*break_flag = 1;
1589			return (0);
1590		}
1591	}
1592	strmno = ntohs(ch->dp.stream_id);
1593	if (strmno >= asoc->streamincnt) {
1594		struct sctp_paramhdr *phdr;
1595		struct mbuf *mb;
1596
1597		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1598		    0, M_DONTWAIT, 1, MT_DATA);
1599		if (mb != NULL) {
1600			/* add some space up front so prepend will work well */
1601			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1602			phdr = mtod(mb, struct sctp_paramhdr *);
1603			/*
1604			 * Error causes are just param's and this one has
1605			 * two back to back phdr, one with the error type
1606			 * and size, the other with the streamid and a rsvd
1607			 */
1608			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1609			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1610			phdr->param_length =
1611			    htons(sizeof(struct sctp_paramhdr) * 2);
1612			phdr++;
1613			/* We insert the stream in the type field */
1614			phdr->param_type = ch->dp.stream_id;
1615			/* And set the length to 0 for the rsvd field */
1616			phdr->param_length = 0;
1617			sctp_queue_op_err(stcb, mb);
1618		}
1619		SCTP_STAT_INCR(sctps_badsid);
1620		SCTP_TCB_LOCK_ASSERT(stcb);
1621		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1622		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1623			asoc->highest_tsn_inside_nr_map = tsn;
1624		}
1625		if (tsn == (asoc->cumulative_tsn + 1)) {
1626			/* Update cum-ack */
1627			asoc->cumulative_tsn = tsn;
1628		}
1629		return (0);
1630	}
1631	/*
1632	 * Before we continue lets validate that we are not being fooled by
1633	 * an evil attacker. We can only have 4k chunks based on our TSN
1634	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1635	 * way our stream sequence numbers could have wrapped. We of course
1636	 * only validate the FIRST fragment so the bit must be set.
1637	 */
1638	strmseq = ntohs(ch->dp.stream_sequence);
1639#ifdef SCTP_ASOCLOG_OF_TSNS
1640	SCTP_TCB_LOCK_ASSERT(stcb);
1641	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1642		asoc->tsn_in_at = 0;
1643		asoc->tsn_in_wrapped = 1;
1644	}
1645	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1646	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1647	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1648	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1649	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1650	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1651	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1652	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1653	asoc->tsn_in_at++;
1654#endif
1655	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1656	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1657	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1658	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1659		/* The incoming sseq is behind where we last delivered? */
1660		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1661		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1662		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1663		    0, M_DONTWAIT, 1, MT_DATA);
1664		if (oper) {
1665			struct sctp_paramhdr *ph;
1666			uint32_t *ippp;
1667
1668			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1669			    (3 * sizeof(uint32_t));
1670			ph = mtod(oper, struct sctp_paramhdr *);
1671			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1672			ph->param_length = htons(SCTP_BUF_LEN(oper));
1673			ippp = (uint32_t *) (ph + 1);
1674			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1675			ippp++;
1676			*ippp = tsn;
1677			ippp++;
1678			*ippp = ((strmno << 16) | strmseq);
1679
1680		}
1681		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1682		sctp_abort_an_association(stcb->sctp_ep, stcb,
1683		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1684		*abort_flag = 1;
1685		return (0);
1686	}
1687	/************************************
1688	 * From here down we may find ch-> invalid
1689	 * so its a good idea NOT to use it.
1690	 *************************************/
1691
1692	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1693	if (last_chunk == 0) {
1694		dmbuf = SCTP_M_COPYM(*m,
1695		    (offset + sizeof(struct sctp_data_chunk)),
1696		    the_len, M_DONTWAIT);
1697#ifdef SCTP_MBUF_LOGGING
1698		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1699			struct mbuf *mat;
1700
1701			mat = dmbuf;
1702			while (mat) {
1703				if (SCTP_BUF_IS_EXTENDED(mat)) {
1704					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1705				}
1706				mat = SCTP_BUF_NEXT(mat);
1707			}
1708		}
1709#endif
1710	} else {
1711		/* We can steal the last chunk */
1712		int l_len;
1713
1714		dmbuf = *m;
1715		/* lop off the top part */
1716		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1717		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1718			l_len = SCTP_BUF_LEN(dmbuf);
1719		} else {
1720			/*
1721			 * need to count up the size hopefully does not hit
1722			 * this to often :-0
1723			 */
1724			struct mbuf *lat;
1725
1726			l_len = 0;
1727			lat = dmbuf;
1728			while (lat) {
1729				l_len += SCTP_BUF_LEN(lat);
1730				lat = SCTP_BUF_NEXT(lat);
1731			}
1732		}
1733		if (l_len > the_len) {
1734			/* Trim the end round bytes off  too */
1735			m_adj(dmbuf, -(l_len - the_len));
1736		}
1737	}
1738	if (dmbuf == NULL) {
1739		SCTP_STAT_INCR(sctps_nomem);
1740		return (0);
1741	}
1742	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1743	    asoc->fragmented_delivery_inprogress == 0 &&
1744	    TAILQ_EMPTY(&asoc->resetHead) &&
1745	    ((ordered == 0) ||
1746	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1747	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1748		/* Candidate for express delivery */
1749		/*
1750		 * Its not fragmented, No PD-API is up, Nothing in the
1751		 * delivery queue, Its un-ordered OR ordered and the next to
1752		 * deliver AND nothing else is stuck on the stream queue,
1753		 * And there is room for it in the socket buffer. Lets just
1754		 * stuff it up the buffer....
1755		 */
1756
1757		/* It would be nice to avoid this copy if we could :< */
1758		sctp_alloc_a_readq(stcb, control);
1759		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1760		    protocol_id,
1761		    stcb->asoc.context,
1762		    strmno, strmseq,
1763		    chunk_flags,
1764		    dmbuf);
1765		if (control == NULL) {
1766			goto failed_express_del;
1767		}
1768		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1769		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1770			asoc->highest_tsn_inside_nr_map = tsn;
1771		}
1772		sctp_add_to_readq(stcb->sctp_ep, stcb,
1773		    control, &stcb->sctp_socket->so_rcv,
1774		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1775
1776		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1777			/* for ordered, bump what we delivered */
1778			asoc->strmin[strmno].last_sequence_delivered++;
1779		}
1780		SCTP_STAT_INCR(sctps_recvexpress);
1781		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1782			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1783			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1784		}
1785		control = NULL;
1786
1787		goto finish_express_del;
1788	}
1789failed_express_del:
1790	/* If we reach here this is a new chunk */
1791	chk = NULL;
1792	control = NULL;
1793	/* Express for fragmented delivery? */
1794	if ((asoc->fragmented_delivery_inprogress) &&
1795	    (stcb->asoc.control_pdapi) &&
1796	    (asoc->str_of_pdapi == strmno) &&
1797	    (asoc->ssn_of_pdapi == strmseq)
1798	    ) {
1799		control = stcb->asoc.control_pdapi;
1800		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1801			/* Can't be another first? */
1802			goto failed_pdapi_express_del;
1803		}
1804		if (tsn == (control->sinfo_tsn + 1)) {
1805			/* Yep, we can add it on */
1806			int end = 0;
1807			uint32_t cumack;
1808
1809			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1810				end = 1;
1811			}
1812			cumack = asoc->cumulative_tsn;
1813			if ((cumack + 1) == tsn)
1814				cumack = tsn;
1815
1816			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1817			    tsn,
1818			    &stcb->sctp_socket->so_rcv)) {
1819				SCTP_PRINTF("Append fails end:%d\n", end);
1820				goto failed_pdapi_express_del;
1821			}
1822			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1823			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1824				asoc->highest_tsn_inside_nr_map = tsn;
1825			}
1826			SCTP_STAT_INCR(sctps_recvexpressm);
1827			control->sinfo_tsn = tsn;
1828			asoc->tsn_last_delivered = tsn;
1829			asoc->fragment_flags = chunk_flags;
1830			asoc->tsn_of_pdapi_last_delivered = tsn;
1831			asoc->last_flags_delivered = chunk_flags;
1832			asoc->last_strm_seq_delivered = strmseq;
1833			asoc->last_strm_no_delivered = strmno;
1834			if (end) {
1835				/* clean up the flags and such */
1836				asoc->fragmented_delivery_inprogress = 0;
1837				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1838					asoc->strmin[strmno].last_sequence_delivered++;
1839				}
1840				stcb->asoc.control_pdapi = NULL;
1841				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1842					/*
1843					 * There could be another message
1844					 * ready
1845					 */
1846					need_reasm_check = 1;
1847				}
1848			}
1849			control = NULL;
1850			goto finish_express_del;
1851		}
1852	}
1853failed_pdapi_express_del:
1854	control = NULL;
1855	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1856		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1857		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1858			asoc->highest_tsn_inside_nr_map = tsn;
1859		}
1860	} else {
1861		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1862		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1863			asoc->highest_tsn_inside_map = tsn;
1864		}
1865	}
1866	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1867		sctp_alloc_a_chunk(stcb, chk);
1868		if (chk == NULL) {
1869			/* No memory so we drop the chunk */
1870			SCTP_STAT_INCR(sctps_nomem);
1871			if (last_chunk == 0) {
1872				/* we copied it, free the copy */
1873				sctp_m_freem(dmbuf);
1874			}
1875			return (0);
1876		}
1877		chk->rec.data.TSN_seq = tsn;
1878		chk->no_fr_allowed = 0;
1879		chk->rec.data.stream_seq = strmseq;
1880		chk->rec.data.stream_number = strmno;
1881		chk->rec.data.payloadtype = protocol_id;
1882		chk->rec.data.context = stcb->asoc.context;
1883		chk->rec.data.doing_fast_retransmit = 0;
1884		chk->rec.data.rcv_flags = chunk_flags;
1885		chk->asoc = asoc;
1886		chk->send_size = the_len;
1887		chk->whoTo = net;
1888		atomic_add_int(&net->ref_count, 1);
1889		chk->data = dmbuf;
1890	} else {
1891		sctp_alloc_a_readq(stcb, control);
1892		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1893		    protocol_id,
1894		    stcb->asoc.context,
1895		    strmno, strmseq,
1896		    chunk_flags,
1897		    dmbuf);
1898		if (control == NULL) {
1899			/* No memory so we drop the chunk */
1900			SCTP_STAT_INCR(sctps_nomem);
1901			if (last_chunk == 0) {
1902				/* we copied it, free the copy */
1903				sctp_m_freem(dmbuf);
1904			}
1905			return (0);
1906		}
1907		control->length = the_len;
1908	}
1909
1910	/* Mark it as received */
1911	/* Now queue it where it belongs */
1912	if (control != NULL) {
1913		/* First a sanity check */
1914		if (asoc->fragmented_delivery_inprogress) {
1915			/*
1916			 * Ok, we have a fragmented delivery in progress if
1917			 * this chunk is next to deliver OR belongs in our
1918			 * view to the reassembly, the peer is evil or
1919			 * broken.
1920			 */
1921			uint32_t estimate_tsn;
1922
1923			estimate_tsn = asoc->tsn_last_delivered + 1;
1924			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1925			    (estimate_tsn == control->sinfo_tsn)) {
1926				/* Evil/Broke peer */
1927				sctp_m_freem(control->data);
1928				control->data = NULL;
1929				if (control->whoFrom) {
1930					sctp_free_remote_addr(control->whoFrom);
1931					control->whoFrom = NULL;
1932				}
1933				sctp_free_a_readq(stcb, control);
1934				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1935				    0, M_DONTWAIT, 1, MT_DATA);
1936				if (oper) {
1937					struct sctp_paramhdr *ph;
1938					uint32_t *ippp;
1939
1940					SCTP_BUF_LEN(oper) =
1941					    sizeof(struct sctp_paramhdr) +
1942					    (3 * sizeof(uint32_t));
1943					ph = mtod(oper, struct sctp_paramhdr *);
1944					ph->param_type =
1945					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1946					ph->param_length = htons(SCTP_BUF_LEN(oper));
1947					ippp = (uint32_t *) (ph + 1);
1948					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1949					ippp++;
1950					*ippp = tsn;
1951					ippp++;
1952					*ippp = ((strmno << 16) | strmseq);
1953				}
1954				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1955				sctp_abort_an_association(stcb->sctp_ep, stcb,
1956				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1957
1958				*abort_flag = 1;
1959				return (0);
1960			} else {
1961				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1962					sctp_m_freem(control->data);
1963					control->data = NULL;
1964					if (control->whoFrom) {
1965						sctp_free_remote_addr(control->whoFrom);
1966						control->whoFrom = NULL;
1967					}
1968					sctp_free_a_readq(stcb, control);
1969
1970					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1971					    0, M_DONTWAIT, 1, MT_DATA);
1972					if (oper) {
1973						struct sctp_paramhdr *ph;
1974						uint32_t *ippp;
1975
1976						SCTP_BUF_LEN(oper) =
1977						    sizeof(struct sctp_paramhdr) +
1978						    (3 * sizeof(uint32_t));
1979						ph = mtod(oper,
1980						    struct sctp_paramhdr *);
1981						ph->param_type =
1982						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1983						ph->param_length =
1984						    htons(SCTP_BUF_LEN(oper));
1985						ippp = (uint32_t *) (ph + 1);
1986						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1987						ippp++;
1988						*ippp = tsn;
1989						ippp++;
1990						*ippp = ((strmno << 16) | strmseq);
1991					}
1992					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1993					sctp_abort_an_association(stcb->sctp_ep,
1994					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1995
1996					*abort_flag = 1;
1997					return (0);
1998				}
1999			}
2000		} else {
2001			/* No PDAPI running */
2002			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2003				/*
2004				 * Reassembly queue is NOT empty validate
2005				 * that this tsn does not need to be in
2006				 * reasembly queue. If it does then our peer
2007				 * is broken or evil.
2008				 */
2009				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2010					sctp_m_freem(control->data);
2011					control->data = NULL;
2012					if (control->whoFrom) {
2013						sctp_free_remote_addr(control->whoFrom);
2014						control->whoFrom = NULL;
2015					}
2016					sctp_free_a_readq(stcb, control);
2017					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2018					    0, M_DONTWAIT, 1, MT_DATA);
2019					if (oper) {
2020						struct sctp_paramhdr *ph;
2021						uint32_t *ippp;
2022
2023						SCTP_BUF_LEN(oper) =
2024						    sizeof(struct sctp_paramhdr) +
2025						    (3 * sizeof(uint32_t));
2026						ph = mtod(oper,
2027						    struct sctp_paramhdr *);
2028						ph->param_type =
2029						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2030						ph->param_length =
2031						    htons(SCTP_BUF_LEN(oper));
2032						ippp = (uint32_t *) (ph + 1);
2033						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2034						ippp++;
2035						*ippp = tsn;
2036						ippp++;
2037						*ippp = ((strmno << 16) | strmseq);
2038					}
2039					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2040					sctp_abort_an_association(stcb->sctp_ep,
2041					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2042
2043					*abort_flag = 1;
2044					return (0);
2045				}
2046			}
2047		}
2048		/* ok, if we reach here we have passed the sanity checks */
2049		if (chunk_flags & SCTP_DATA_UNORDERED) {
2050			/* queue directly into socket buffer */
2051			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2052			sctp_add_to_readq(stcb->sctp_ep, stcb,
2053			    control,
2054			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2055		} else {
2056			/*
2057			 * Special check for when streams are resetting. We
2058			 * could be more smart about this and check the
2059			 * actual stream to see if it is not being reset..
2060			 * that way we would not create a HOLB when amongst
2061			 * streams being reset and those not being reset.
2062			 *
2063			 * We take complete messages that have a stream reset
2064			 * intervening (aka the TSN is after where our
2065			 * cum-ack needs to be) off and put them on a
2066			 * pending_reply_queue. The reassembly ones we do
2067			 * not have to worry about since they are all sorted
2068			 * and proceessed by TSN order. It is only the
2069			 * singletons I must worry about.
2070			 */
2071			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2072			    SCTP_TSN_GT(tsn, liste->tsn)) {
2073				/*
2074				 * yep its past where we need to reset... go
2075				 * ahead and queue it.
2076				 */
2077				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2078					/* first one on */
2079					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2080				} else {
2081					struct sctp_queued_to_read *ctlOn,
2082					                   *nctlOn;
2083					unsigned char inserted = 0;
2084
2085					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2086						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2087							continue;
2088						} else {
2089							/* found it */
2090							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2091							inserted = 1;
2092							break;
2093						}
2094					}
2095					if (inserted == 0) {
2096						/*
2097						 * must be put at end, use
2098						 * prevP (all setup from
2099						 * loop) to setup nextP.
2100						 */
2101						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2102					}
2103				}
2104			} else {
2105				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2106				if (*abort_flag) {
2107					return (0);
2108				}
2109			}
2110		}
2111	} else {
2112		/* Into the re-assembly queue */
2113		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2114		if (*abort_flag) {
2115			/*
2116			 * the assoc is now gone and chk was put onto the
2117			 * reasm queue, which has all been freed.
2118			 */
2119			*m = NULL;
2120			return (0);
2121		}
2122	}
2123finish_express_del:
2124	if (tsn == (asoc->cumulative_tsn + 1)) {
2125		/* Update cum-ack */
2126		asoc->cumulative_tsn = tsn;
2127	}
2128	if (last_chunk) {
2129		*m = NULL;
2130	}
2131	if (ordered) {
2132		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2133	} else {
2134		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2135	}
2136	SCTP_STAT_INCR(sctps_recvdata);
2137	/* Set it present please */
2138	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2139		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2140	}
2141	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2142		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2143		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2144	}
2145	/* check the special flag for stream resets */
2146	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2147	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2148		/*
2149		 * we have finished working through the backlogged TSN's now
2150		 * time to reset streams. 1: call reset function. 2: free
2151		 * pending_reply space 3: distribute any chunks in
2152		 * pending_reply_queue.
2153		 */
2154		struct sctp_queued_to_read *ctl, *nctl;
2155
2156		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2157		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2158		SCTP_FREE(liste, SCTP_M_STRESET);
2159		/* sa_ignore FREED_MEMORY */
2160		liste = TAILQ_FIRST(&asoc->resetHead);
2161		if (TAILQ_EMPTY(&asoc->resetHead)) {
2162			/* All can be removed */
2163			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2164				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2165				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2166				if (*abort_flag) {
2167					return (0);
2168				}
2169			}
2170		} else {
2171			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2172				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2173					break;
2174				}
2175				/*
2176				 * if ctl->sinfo_tsn is <= liste->tsn we can
2177				 * process it which is the NOT of
2178				 * ctl->sinfo_tsn > liste->tsn
2179				 */
2180				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2181				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2182				if (*abort_flag) {
2183					return (0);
2184				}
2185			}
2186		}
2187		/*
2188		 * Now service re-assembly to pick up anything that has been
2189		 * held on reassembly queue?
2190		 */
2191		sctp_deliver_reasm_check(stcb, asoc);
2192		need_reasm_check = 0;
2193	}
2194	if (need_reasm_check) {
2195		/* Another one waits ? */
2196		sctp_deliver_reasm_check(stcb, asoc);
2197	}
2198	return (1);
2199}
2200
2201int8_t sctp_map_lookup_tab[256] = {
2202	0, 1, 0, 2, 0, 1, 0, 3,
2203	0, 1, 0, 2, 0, 1, 0, 4,
2204	0, 1, 0, 2, 0, 1, 0, 3,
2205	0, 1, 0, 2, 0, 1, 0, 5,
2206	0, 1, 0, 2, 0, 1, 0, 3,
2207	0, 1, 0, 2, 0, 1, 0, 4,
2208	0, 1, 0, 2, 0, 1, 0, 3,
2209	0, 1, 0, 2, 0, 1, 0, 6,
2210	0, 1, 0, 2, 0, 1, 0, 3,
2211	0, 1, 0, 2, 0, 1, 0, 4,
2212	0, 1, 0, 2, 0, 1, 0, 3,
2213	0, 1, 0, 2, 0, 1, 0, 5,
2214	0, 1, 0, 2, 0, 1, 0, 3,
2215	0, 1, 0, 2, 0, 1, 0, 4,
2216	0, 1, 0, 2, 0, 1, 0, 3,
2217	0, 1, 0, 2, 0, 1, 0, 7,
2218	0, 1, 0, 2, 0, 1, 0, 3,
2219	0, 1, 0, 2, 0, 1, 0, 4,
2220	0, 1, 0, 2, 0, 1, 0, 3,
2221	0, 1, 0, 2, 0, 1, 0, 5,
2222	0, 1, 0, 2, 0, 1, 0, 3,
2223	0, 1, 0, 2, 0, 1, 0, 4,
2224	0, 1, 0, 2, 0, 1, 0, 3,
2225	0, 1, 0, 2, 0, 1, 0, 6,
2226	0, 1, 0, 2, 0, 1, 0, 3,
2227	0, 1, 0, 2, 0, 1, 0, 4,
2228	0, 1, 0, 2, 0, 1, 0, 3,
2229	0, 1, 0, 2, 0, 1, 0, 5,
2230	0, 1, 0, 2, 0, 1, 0, 3,
2231	0, 1, 0, 2, 0, 1, 0, 4,
2232	0, 1, 0, 2, 0, 1, 0, 3,
2233	0, 1, 0, 2, 0, 1, 0, 8
2234};
2235
2236
2237void
2238sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2239{
2240	/*
2241	 * Now we also need to check the mapping array in a couple of ways.
2242	 * 1) Did we move the cum-ack point?
2243	 *
2244	 * When you first glance at this you might think that all entries that
2245	 * make up the postion of the cum-ack would be in the nr-mapping
2246	 * array only.. i.e. things up to the cum-ack are always
2247	 * deliverable. Thats true with one exception, when its a fragmented
2248	 * message we may not deliver the data until some threshold (or all
2249	 * of it) is in place. So we must OR the nr_mapping_array and
2250	 * mapping_array to get a true picture of the cum-ack.
2251	 */
2252	struct sctp_association *asoc;
2253	int at;
2254	uint8_t val;
2255	int slide_from, slide_end, lgap, distance;
2256	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2257
2258	asoc = &stcb->asoc;
2259
2260	old_cumack = asoc->cumulative_tsn;
2261	old_base = asoc->mapping_array_base_tsn;
2262	old_highest = asoc->highest_tsn_inside_map;
2263	/*
2264	 * We could probably improve this a small bit by calculating the
2265	 * offset of the current cum-ack as the starting point.
2266	 */
2267	at = 0;
2268	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2269		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2270		if (val == 0xff) {
2271			at += 8;
2272		} else {
2273			/* there is a 0 bit */
2274			at += sctp_map_lookup_tab[val];
2275			break;
2276		}
2277	}
2278	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2279
2280	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2281	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2282#ifdef INVARIANTS
2283		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2284		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2285#else
2286		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2287		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2288		sctp_print_mapping_array(asoc);
2289		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2290			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2291		}
2292		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2293		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2294#endif
2295	}
2296	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2297		highest_tsn = asoc->highest_tsn_inside_nr_map;
2298	} else {
2299		highest_tsn = asoc->highest_tsn_inside_map;
2300	}
2301	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2302		/* The complete array was completed by a single FR */
2303		/* highest becomes the cum-ack */
2304		int clr;
2305
2306#ifdef INVARIANTS
2307		unsigned int i;
2308
2309#endif
2310
2311		/* clear the array */
2312		clr = ((at + 7) >> 3);
2313		if (clr > asoc->mapping_array_size) {
2314			clr = asoc->mapping_array_size;
2315		}
2316		memset(asoc->mapping_array, 0, clr);
2317		memset(asoc->nr_mapping_array, 0, clr);
2318#ifdef INVARIANTS
2319		for (i = 0; i < asoc->mapping_array_size; i++) {
2320			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2321				printf("Error Mapping array's not clean at clear\n");
2322				sctp_print_mapping_array(asoc);
2323			}
2324		}
2325#endif
2326		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2327		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2328	} else if (at >= 8) {
2329		/* we can slide the mapping array down */
2330		/* slide_from holds where we hit the first NON 0xff byte */
2331
2332		/*
2333		 * now calculate the ceiling of the move using our highest
2334		 * TSN value
2335		 */
2336		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2337		slide_end = (lgap >> 3);
2338		if (slide_end < slide_from) {
2339			sctp_print_mapping_array(asoc);
2340#ifdef INVARIANTS
2341			panic("impossible slide");
2342#else
2343			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2344			    lgap, slide_end, slide_from, at);
2345			return;
2346#endif
2347		}
2348		if (slide_end > asoc->mapping_array_size) {
2349#ifdef INVARIANTS
2350			panic("would overrun buffer");
2351#else
2352			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2353			    asoc->mapping_array_size, slide_end);
2354			slide_end = asoc->mapping_array_size;
2355#endif
2356		}
2357		distance = (slide_end - slide_from) + 1;
2358		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2359			sctp_log_map(old_base, old_cumack, old_highest,
2360			    SCTP_MAP_PREPARE_SLIDE);
2361			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2362			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2363		}
2364		if (distance + slide_from > asoc->mapping_array_size ||
2365		    distance < 0) {
2366			/*
2367			 * Here we do NOT slide forward the array so that
2368			 * hopefully when more data comes in to fill it up
2369			 * we will be able to slide it forward. Really I
2370			 * don't think this should happen :-0
2371			 */
2372
2373			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2374				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2375				    (uint32_t) asoc->mapping_array_size,
2376				    SCTP_MAP_SLIDE_NONE);
2377			}
2378		} else {
2379			int ii;
2380
2381			for (ii = 0; ii < distance; ii++) {
2382				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2383				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2384
2385			}
2386			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2387				asoc->mapping_array[ii] = 0;
2388				asoc->nr_mapping_array[ii] = 0;
2389			}
2390			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2391				asoc->highest_tsn_inside_map += (slide_from << 3);
2392			}
2393			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2394				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2395			}
2396			asoc->mapping_array_base_tsn += (slide_from << 3);
2397			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2398				sctp_log_map(asoc->mapping_array_base_tsn,
2399				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2400				    SCTP_MAP_SLIDE_RESULT);
2401			}
2402		}
2403	}
2404}
2405
2406void
2407sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2408{
2409	struct sctp_association *asoc;
2410	uint32_t highest_tsn;
2411
2412	asoc = &stcb->asoc;
2413	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2414		highest_tsn = asoc->highest_tsn_inside_nr_map;
2415	} else {
2416		highest_tsn = asoc->highest_tsn_inside_map;
2417	}
2418
2419	/*
2420	 * Now we need to see if we need to queue a sack or just start the
2421	 * timer (if allowed).
2422	 */
2423	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2424		/*
2425		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2426		 * sure SACK timer is off and instead send a SHUTDOWN and a
2427		 * SACK
2428		 */
2429		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2430			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2431			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2432		}
2433		sctp_send_shutdown(stcb,
2434		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2435		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2436	} else {
2437		int is_a_gap;
2438
2439		/* is there a gap now ? */
2440		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2441
2442		/*
2443		 * CMT DAC algorithm: increase number of packets received
2444		 * since last ack
2445		 */
2446		stcb->asoc.cmt_dac_pkts_rcvd++;
2447
2448		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2449							 * SACK */
2450		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2451							 * longer is one */
2452		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2453		    (is_a_gap) ||	/* is still a gap */
2454		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2455		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2456		    ) {
2457
2458			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2459			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2460			    (stcb->asoc.send_sack == 0) &&
2461			    (stcb->asoc.numduptsns == 0) &&
2462			    (stcb->asoc.delayed_ack) &&
2463			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2464
2465				/*
2466				 * CMT DAC algorithm: With CMT, delay acks
2467				 * even in the face of
2468				 *
2469				 * reordering. Therefore, if acks that do not
2470				 * have to be sent because of the above
2471				 * reasons, will be delayed. That is, acks
2472				 * that would have been sent due to gap
2473				 * reports will be delayed with DAC. Start
2474				 * the delayed ack timer.
2475				 */
2476				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2477				    stcb->sctp_ep, stcb, NULL);
2478			} else {
2479				/*
2480				 * Ok we must build a SACK since the timer
2481				 * is pending, we got our first packet OR
2482				 * there are gaps or duplicates.
2483				 */
2484				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2485				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2486			}
2487		} else {
2488			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2489				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2490				    stcb->sctp_ep, stcb, NULL);
2491			}
2492		}
2493	}
2494}
2495
2496void
2497sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2498{
2499	struct sctp_tmit_chunk *chk;
2500	uint32_t tsize, pd_point;
2501	uint16_t nxt_todel;
2502
2503	if (asoc->fragmented_delivery_inprogress) {
2504		sctp_service_reassembly(stcb, asoc);
2505	}
2506	/* Can we proceed further, i.e. the PD-API is complete */
2507	if (asoc->fragmented_delivery_inprogress) {
2508		/* no */
2509		return;
2510	}
2511	/*
2512	 * Now is there some other chunk I can deliver from the reassembly
2513	 * queue.
2514	 */
2515doit_again:
2516	chk = TAILQ_FIRST(&asoc->reasmqueue);
2517	if (chk == NULL) {
2518		asoc->size_on_reasm_queue = 0;
2519		asoc->cnt_on_reasm_queue = 0;
2520		return;
2521	}
2522	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2523	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2524	    ((nxt_todel == chk->rec.data.stream_seq) ||
2525	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2526		/*
2527		 * Yep the first one is here. We setup to start reception,
2528		 * by backing down the TSN just in case we can't deliver.
2529		 */
2530
2531		/*
2532		 * Before we start though either all of the message should
2533		 * be here or the socket buffer max or nothing on the
2534		 * delivery queue and something can be delivered.
2535		 */
2536		if (stcb->sctp_socket) {
2537			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2538			    stcb->sctp_ep->partial_delivery_point);
2539		} else {
2540			pd_point = stcb->sctp_ep->partial_delivery_point;
2541		}
2542		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2543			asoc->fragmented_delivery_inprogress = 1;
2544			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2545			asoc->str_of_pdapi = chk->rec.data.stream_number;
2546			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2547			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2548			asoc->fragment_flags = chk->rec.data.rcv_flags;
2549			sctp_service_reassembly(stcb, asoc);
2550			if (asoc->fragmented_delivery_inprogress == 0) {
2551				goto doit_again;
2552			}
2553		}
2554	}
2555}
2556
2557int
2558sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2559    struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2560    struct sctp_nets *net, uint32_t * high_tsn)
2561{
2562	struct sctp_data_chunk *ch, chunk_buf;
2563	struct sctp_association *asoc;
2564	int num_chunks = 0;	/* number of control chunks processed */
2565	int stop_proc = 0;
2566	int chk_length, break_flag, last_chunk;
2567	int abort_flag = 0, was_a_gap;
2568	struct mbuf *m;
2569	uint32_t highest_tsn;
2570
2571	/* set the rwnd */
2572	sctp_set_rwnd(stcb, &stcb->asoc);
2573
2574	m = *mm;
2575	SCTP_TCB_LOCK_ASSERT(stcb);
2576	asoc = &stcb->asoc;
2577	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2578		highest_tsn = asoc->highest_tsn_inside_nr_map;
2579	} else {
2580		highest_tsn = asoc->highest_tsn_inside_map;
2581	}
2582	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2583	/*
2584	 * setup where we got the last DATA packet from for any SACK that
2585	 * may need to go out. Don't bump the net. This is done ONLY when a
2586	 * chunk is assigned.
2587	 */
2588	asoc->last_data_chunk_from = net;
2589
2590	/*-
2591	 * Now before we proceed we must figure out if this is a wasted
2592	 * cluster... i.e. it is a small packet sent in and yet the driver
2593	 * underneath allocated a full cluster for it. If so we must copy it
2594	 * to a smaller mbuf and free up the cluster mbuf. This will help
2595	 * with cluster starvation. Note for __Panda__ we don't do this
2596	 * since it has clusters all the way down to 64 bytes.
2597	 */
2598	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2599		/* we only handle mbufs that are singletons.. not chains */
2600		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2601		if (m) {
2602			/* ok lets see if we can copy the data up */
2603			caddr_t *from, *to;
2604
2605			/* get the pointers and copy */
2606			to = mtod(m, caddr_t *);
2607			from = mtod((*mm), caddr_t *);
2608			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2609			/* copy the length and free up the old */
2610			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2611			sctp_m_freem(*mm);
2612			/* sucess, back copy */
2613			*mm = m;
2614		} else {
2615			/* We are in trouble in the mbuf world .. yikes */
2616			m = *mm;
2617		}
2618	}
2619	/* get pointer to the first chunk header */
2620	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2621	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2622	if (ch == NULL) {
2623		return (1);
2624	}
2625	/*
2626	 * process all DATA chunks...
2627	 */
2628	*high_tsn = asoc->cumulative_tsn;
2629	break_flag = 0;
2630	asoc->data_pkts_seen++;
2631	while (stop_proc == 0) {
2632		/* validate chunk length */
2633		chk_length = ntohs(ch->ch.chunk_length);
2634		if (length - *offset < chk_length) {
2635			/* all done, mutulated chunk */
2636			stop_proc = 1;
2637			break;
2638		}
2639		if (ch->ch.chunk_type == SCTP_DATA) {
2640			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2641				/*
2642				 * Need to send an abort since we had a
2643				 * invalid data chunk.
2644				 */
2645				struct mbuf *op_err;
2646
2647				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2648				    0, M_DONTWAIT, 1, MT_DATA);
2649
2650				if (op_err) {
2651					struct sctp_paramhdr *ph;
2652					uint32_t *ippp;
2653
2654					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2655					    (2 * sizeof(uint32_t));
2656					ph = mtod(op_err, struct sctp_paramhdr *);
2657					ph->param_type =
2658					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2659					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2660					ippp = (uint32_t *) (ph + 1);
2661					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2662					ippp++;
2663					*ippp = asoc->cumulative_tsn;
2664
2665				}
2666				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2667				sctp_abort_association(inp, stcb, m, iphlen, sh,
2668				    op_err, 0, net->port);
2669				return (2);
2670			}
2671#ifdef SCTP_AUDITING_ENABLED
2672			sctp_audit_log(0xB1, 0);
2673#endif
2674			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2675				last_chunk = 1;
2676			} else {
2677				last_chunk = 0;
2678			}
2679			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2680			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2681			    last_chunk)) {
2682				num_chunks++;
2683			}
2684			if (abort_flag)
2685				return (2);
2686
2687			if (break_flag) {
2688				/*
2689				 * Set because of out of rwnd space and no
2690				 * drop rep space left.
2691				 */
2692				stop_proc = 1;
2693				break;
2694			}
2695		} else {
2696			/* not a data chunk in the data region */
2697			switch (ch->ch.chunk_type) {
2698			case SCTP_INITIATION:
2699			case SCTP_INITIATION_ACK:
2700			case SCTP_SELECTIVE_ACK:
2701			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2702			case SCTP_HEARTBEAT_REQUEST:
2703			case SCTP_HEARTBEAT_ACK:
2704			case SCTP_ABORT_ASSOCIATION:
2705			case SCTP_SHUTDOWN:
2706			case SCTP_SHUTDOWN_ACK:
2707			case SCTP_OPERATION_ERROR:
2708			case SCTP_COOKIE_ECHO:
2709			case SCTP_COOKIE_ACK:
2710			case SCTP_ECN_ECHO:
2711			case SCTP_ECN_CWR:
2712			case SCTP_SHUTDOWN_COMPLETE:
2713			case SCTP_AUTHENTICATION:
2714			case SCTP_ASCONF_ACK:
2715			case SCTP_PACKET_DROPPED:
2716			case SCTP_STREAM_RESET:
2717			case SCTP_FORWARD_CUM_TSN:
2718			case SCTP_ASCONF:
2719				/*
2720				 * Now, what do we do with KNOWN chunks that
2721				 * are NOT in the right place?
2722				 *
2723				 * For now, I do nothing but ignore them. We
2724				 * may later want to add sysctl stuff to
2725				 * switch out and do either an ABORT() or
2726				 * possibly process them.
2727				 */
2728				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2729					struct mbuf *op_err;
2730
2731					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2732					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2733					return (2);
2734				}
2735				break;
2736			default:
2737				/* unknown chunk type, use bit rules */
2738				if (ch->ch.chunk_type & 0x40) {
2739					/* Add a error report to the queue */
2740					struct mbuf *merr;
2741					struct sctp_paramhdr *phd;
2742
2743					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2744					if (merr) {
2745						phd = mtod(merr, struct sctp_paramhdr *);
2746						/*
2747						 * We cheat and use param
2748						 * type since we did not
2749						 * bother to define a error
2750						 * cause struct. They are
2751						 * the same basic format
2752						 * with different names.
2753						 */
2754						phd->param_type =
2755						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2756						phd->param_length =
2757						    htons(chk_length + sizeof(*phd));
2758						SCTP_BUF_LEN(merr) = sizeof(*phd);
2759						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2760						    SCTP_SIZE32(chk_length),
2761						    M_DONTWAIT);
2762						if (SCTP_BUF_NEXT(merr)) {
2763							sctp_queue_op_err(stcb, merr);
2764						} else {
2765							sctp_m_freem(merr);
2766						}
2767					}
2768				}
2769				if ((ch->ch.chunk_type & 0x80) == 0) {
2770					/* discard the rest of this packet */
2771					stop_proc = 1;
2772				}	/* else skip this bad chunk and
2773					 * continue... */
2774				break;
2775			};	/* switch of chunk type */
2776		}
2777		*offset += SCTP_SIZE32(chk_length);
2778		if ((*offset >= length) || stop_proc) {
2779			/* no more data left in the mbuf chain */
2780			stop_proc = 1;
2781			continue;
2782		}
2783		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2784		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2785		if (ch == NULL) {
2786			*offset = length;
2787			stop_proc = 1;
2788			break;
2789
2790		}
2791	}			/* while */
2792	if (break_flag) {
2793		/*
2794		 * we need to report rwnd overrun drops.
2795		 */
2796		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2797	}
2798	if (num_chunks) {
2799		/*
2800		 * Did we get data, if so update the time for auto-close and
2801		 * give peer credit for being alive.
2802		 */
2803		SCTP_STAT_INCR(sctps_recvpktwithdata);
2804		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2805			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2806			    stcb->asoc.overall_error_count,
2807			    0,
2808			    SCTP_FROM_SCTP_INDATA,
2809			    __LINE__);
2810		}
2811		stcb->asoc.overall_error_count = 0;
2812		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2813	}
2814	/* now service all of the reassm queue if needed */
2815	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2816		sctp_service_queues(stcb, asoc);
2817
2818	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2819		/* Assure that we ack right away */
2820		stcb->asoc.send_sack = 1;
2821	}
2822	/* Start a sack timer or QUEUE a SACK for sending */
2823	sctp_sack_check(stcb, was_a_gap);
2824	return (0);
2825}
2826
2827static int
2828sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2829    uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2830    int *num_frs,
2831    uint32_t * biggest_newly_acked_tsn,
2832    uint32_t * this_sack_lowest_newack,
2833    int *rto_ok)
2834{
2835	struct sctp_tmit_chunk *tp1;
2836	unsigned int theTSN;
2837	int j, wake_him = 0, circled = 0;
2838
2839	/* Recover the tp1 we last saw */
2840	tp1 = *p_tp1;
2841	if (tp1 == NULL) {
2842		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2843	}
2844	for (j = frag_strt; j <= frag_end; j++) {
2845		theTSN = j + last_tsn;
2846		while (tp1) {
2847			if (tp1->rec.data.doing_fast_retransmit)
2848				(*num_frs) += 1;
2849
2850			/*-
2851			 * CMT: CUCv2 algorithm. For each TSN being
2852			 * processed from the sent queue, track the
2853			 * next expected pseudo-cumack, or
2854			 * rtx_pseudo_cumack, if required. Separate
2855			 * cumack trackers for first transmissions,
2856			 * and retransmissions.
2857			 */
2858			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2859			    (tp1->snd_count == 1)) {
2860				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2861				tp1->whoTo->find_pseudo_cumack = 0;
2862			}
2863			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2864			    (tp1->snd_count > 1)) {
2865				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2866				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2867			}
2868			if (tp1->rec.data.TSN_seq == theTSN) {
2869				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2870					/*-
2871					 * must be held until
2872					 * cum-ack passes
2873					 */
2874					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2875						/*-
2876						 * If it is less than RESEND, it is
2877						 * now no-longer in flight.
2878						 * Higher values may already be set
2879						 * via previous Gap Ack Blocks...
2880						 * i.e. ACKED or RESEND.
2881						 */
2882						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2883						    *biggest_newly_acked_tsn)) {
2884							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2885						}
2886						/*-
2887						 * CMT: SFR algo (and HTNA) - set
2888						 * saw_newack to 1 for dest being
2889						 * newly acked. update
2890						 * this_sack_highest_newack if
2891						 * appropriate.
2892						 */
2893						if (tp1->rec.data.chunk_was_revoked == 0)
2894							tp1->whoTo->saw_newack = 1;
2895
2896						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2897						    tp1->whoTo->this_sack_highest_newack)) {
2898							tp1->whoTo->this_sack_highest_newack =
2899							    tp1->rec.data.TSN_seq;
2900						}
2901						/*-
2902						 * CMT DAC algo: also update
2903						 * this_sack_lowest_newack
2904						 */
2905						if (*this_sack_lowest_newack == 0) {
2906							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2907								sctp_log_sack(*this_sack_lowest_newack,
2908								    last_tsn,
2909								    tp1->rec.data.TSN_seq,
2910								    0,
2911								    0,
2912								    SCTP_LOG_TSN_ACKED);
2913							}
2914							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2915						}
2916						/*-
2917						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2918						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2919						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2920						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2921						 * Separate pseudo_cumack trackers for first transmissions and
2922						 * retransmissions.
2923						 */
2924						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2925							if (tp1->rec.data.chunk_was_revoked == 0) {
2926								tp1->whoTo->new_pseudo_cumack = 1;
2927							}
2928							tp1->whoTo->find_pseudo_cumack = 1;
2929						}
2930						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2931							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2932						}
2933						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2934							if (tp1->rec.data.chunk_was_revoked == 0) {
2935								tp1->whoTo->new_pseudo_cumack = 1;
2936							}
2937							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2938						}
2939						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2940							sctp_log_sack(*biggest_newly_acked_tsn,
2941							    last_tsn,
2942							    tp1->rec.data.TSN_seq,
2943							    frag_strt,
2944							    frag_end,
2945							    SCTP_LOG_TSN_ACKED);
2946						}
2947						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2948							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2949							    tp1->whoTo->flight_size,
2950							    tp1->book_size,
2951							    (uintptr_t) tp1->whoTo,
2952							    tp1->rec.data.TSN_seq);
2953						}
2954						sctp_flight_size_decrease(tp1);
2955						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2956							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2957							    tp1);
2958						}
2959						sctp_total_flight_decrease(stcb, tp1);
2960
2961						tp1->whoTo->net_ack += tp1->send_size;
2962						if (tp1->snd_count < 2) {
2963							/*-
2964							 * True non-retransmited chunk
2965							 */
2966							tp1->whoTo->net_ack2 += tp1->send_size;
2967
2968							/*-
2969							 * update RTO too ?
2970							 */
2971							if (tp1->do_rtt) {
2972								if (*rto_ok) {
2973									tp1->whoTo->RTO =
2974									    sctp_calculate_rto(stcb,
2975									    &stcb->asoc,
2976									    tp1->whoTo,
2977									    &tp1->sent_rcv_time,
2978									    sctp_align_safe_nocopy,
2979									    SCTP_RTT_FROM_DATA);
2980									*rto_ok = 0;
2981								}
2982								if (tp1->whoTo->rto_needed == 0) {
2983									tp1->whoTo->rto_needed = 1;
2984								}
2985								tp1->do_rtt = 0;
2986							}
2987						}
2988					}
2989					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2990						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2991						    stcb->asoc.this_sack_highest_gap)) {
2992							stcb->asoc.this_sack_highest_gap =
2993							    tp1->rec.data.TSN_seq;
2994						}
2995						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2996							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2997#ifdef SCTP_AUDITING_ENABLED
2998							sctp_audit_log(0xB2,
2999							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3000#endif
3001						}
3002					}
3003					/*-
3004					 * All chunks NOT UNSENT fall through here and are marked
3005					 * (leave PR-SCTP ones that are to skip alone though)
3006					 */
3007					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3008						tp1->sent = SCTP_DATAGRAM_MARKED;
3009
3010					if (tp1->rec.data.chunk_was_revoked) {
3011						/* deflate the cwnd */
3012						tp1->whoTo->cwnd -= tp1->book_size;
3013						tp1->rec.data.chunk_was_revoked = 0;
3014					}
3015					/* NR Sack code here */
3016					if (nr_sacking) {
3017						if (tp1->data) {
3018							/*
3019							 * sa_ignore
3020							 * NO_NULL_CHK
3021							 */
3022							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3023							sctp_m_freem(tp1->data);
3024							tp1->data = NULL;
3025						}
3026						wake_him++;
3027					}
3028				}
3029				break;
3030			}	/* if (tp1->TSN_seq == theTSN) */
3031			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3032				break;
3033			}
3034			tp1 = TAILQ_NEXT(tp1, sctp_next);
3035			if ((tp1 == NULL) && (circled == 0)) {
3036				circled++;
3037				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3038			}
3039		}		/* end while (tp1) */
3040		if (tp1 == NULL) {
3041			circled = 0;
3042			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3043		}
3044		/* In case the fragments were not in order we must reset */
3045	}			/* end for (j = fragStart */
3046	*p_tp1 = tp1;
3047	return (wake_him);	/* Return value only used for nr-sack */
3048}
3049
3050
3051static int
3052sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3053    uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3054    uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3055    int num_seg, int num_nr_seg, int *rto_ok)
3056{
3057	struct sctp_gap_ack_block *frag, block;
3058	struct sctp_tmit_chunk *tp1;
3059	int i;
3060	int num_frs = 0;
3061	int chunk_freed;
3062	int non_revocable;
3063	uint16_t frag_strt, frag_end, prev_frag_end;
3064
3065	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3066	prev_frag_end = 0;
3067	chunk_freed = 0;
3068
3069	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3070		if (i == num_seg) {
3071			prev_frag_end = 0;
3072			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3073		}
3074		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3075		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3076		*offset += sizeof(block);
3077		if (frag == NULL) {
3078			return (chunk_freed);
3079		}
3080		frag_strt = ntohs(frag->start);
3081		frag_end = ntohs(frag->end);
3082
3083		if (frag_strt > frag_end) {
3084			/* This gap report is malformed, skip it. */
3085			continue;
3086		}
3087		if (frag_strt <= prev_frag_end) {
3088			/* This gap report is not in order, so restart. */
3089			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3090		}
3091		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3092			*biggest_tsn_acked = last_tsn + frag_end;
3093		}
3094		if (i < num_seg) {
3095			non_revocable = 0;
3096		} else {
3097			non_revocable = 1;
3098		}
3099		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3100		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3101		    this_sack_lowest_newack, rto_ok)) {
3102			chunk_freed = 1;
3103		}
3104		prev_frag_end = frag_end;
3105	}
3106	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3107		if (num_frs)
3108			sctp_log_fr(*biggest_tsn_acked,
3109			    *biggest_newly_acked_tsn,
3110			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3111	}
3112	return (chunk_freed);
3113}
3114
3115static void
3116sctp_check_for_revoked(struct sctp_tcb *stcb,
3117    struct sctp_association *asoc, uint32_t cumack,
3118    uint32_t biggest_tsn_acked)
3119{
3120	struct sctp_tmit_chunk *tp1;
3121	int tot_revoked = 0;
3122
3123	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3124		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3125			/*
3126			 * ok this guy is either ACK or MARKED. If it is
3127			 * ACKED it has been previously acked but not this
3128			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3129			 * again.
3130			 */
3131			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3132				break;
3133			}
3134			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3135				/* it has been revoked */
3136				tp1->sent = SCTP_DATAGRAM_SENT;
3137				tp1->rec.data.chunk_was_revoked = 1;
3138				/*
3139				 * We must add this stuff back in to assure
3140				 * timers and such get started.
3141				 */
3142				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3143					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3144					    tp1->whoTo->flight_size,
3145					    tp1->book_size,
3146					    (uintptr_t) tp1->whoTo,
3147					    tp1->rec.data.TSN_seq);
3148				}
3149				sctp_flight_size_increase(tp1);
3150				sctp_total_flight_increase(stcb, tp1);
3151				/*
3152				 * We inflate the cwnd to compensate for our
3153				 * artificial inflation of the flight_size.
3154				 */
3155				tp1->whoTo->cwnd += tp1->book_size;
3156				tot_revoked++;
3157				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3158					sctp_log_sack(asoc->last_acked_seq,
3159					    cumack,
3160					    tp1->rec.data.TSN_seq,
3161					    0,
3162					    0,
3163					    SCTP_LOG_TSN_REVOKED);
3164				}
3165			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3166				/* it has been re-acked in this SACK */
3167				tp1->sent = SCTP_DATAGRAM_ACKED;
3168			}
3169		}
3170		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3171			break;
3172	}
3173}
3174
3175
3176static void
3177sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3178    uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3179{
3180	struct sctp_tmit_chunk *tp1;
3181	int strike_flag = 0;
3182	struct timeval now;
3183	int tot_retrans = 0;
3184	uint32_t sending_seq;
3185	struct sctp_nets *net;
3186	int num_dests_sacked = 0;
3187
3188	/*
3189	 * select the sending_seq, this is either the next thing ready to be
3190	 * sent but not transmitted, OR, the next seq we assign.
3191	 */
3192	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3193	if (tp1 == NULL) {
3194		sending_seq = asoc->sending_seq;
3195	} else {
3196		sending_seq = tp1->rec.data.TSN_seq;
3197	}
3198
3199	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3200	if ((asoc->sctp_cmt_on_off > 0) &&
3201	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3202		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3203			if (net->saw_newack)
3204				num_dests_sacked++;
3205		}
3206	}
3207	if (stcb->asoc.peer_supports_prsctp) {
3208		(void)SCTP_GETTIME_TIMEVAL(&now);
3209	}
3210	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3211		strike_flag = 0;
3212		if (tp1->no_fr_allowed) {
3213			/* this one had a timeout or something */
3214			continue;
3215		}
3216		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3217			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3218				sctp_log_fr(biggest_tsn_newly_acked,
3219				    tp1->rec.data.TSN_seq,
3220				    tp1->sent,
3221				    SCTP_FR_LOG_CHECK_STRIKE);
3222		}
3223		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3224		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3225			/* done */
3226			break;
3227		}
3228		if (stcb->asoc.peer_supports_prsctp) {
3229			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3230				/* Is it expired? */
3231				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3232					/* Yes so drop it */
3233					if (tp1->data != NULL) {
3234						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3235						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3236						    SCTP_SO_NOT_LOCKED);
3237					}
3238					continue;
3239				}
3240			}
3241		}
3242		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3243			/* we are beyond the tsn in the sack  */
3244			break;
3245		}
3246		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3247			/* either a RESEND, ACKED, or MARKED */
3248			/* skip */
3249			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3250				/* Continue strikin FWD-TSN chunks */
3251				tp1->rec.data.fwd_tsn_cnt++;
3252			}
3253			continue;
3254		}
3255		/*
3256		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3257		 */
3258		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3259			/*
3260			 * No new acks were receieved for data sent to this
3261			 * dest. Therefore, according to the SFR algo for
3262			 * CMT, no data sent to this dest can be marked for
3263			 * FR using this SACK.
3264			 */
3265			continue;
3266		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3267		    tp1->whoTo->this_sack_highest_newack)) {
3268			/*
3269			 * CMT: New acks were receieved for data sent to
3270			 * this dest. But no new acks were seen for data
3271			 * sent after tp1. Therefore, according to the SFR
3272			 * algo for CMT, tp1 cannot be marked for FR using
3273			 * this SACK. This step covers part of the DAC algo
3274			 * and the HTNA algo as well.
3275			 */
3276			continue;
3277		}
3278		/*
3279		 * Here we check to see if we were have already done a FR
3280		 * and if so we see if the biggest TSN we saw in the sack is
3281		 * smaller than the recovery point. If so we don't strike
3282		 * the tsn... otherwise we CAN strike the TSN.
3283		 */
3284		/*
3285		 * @@@ JRI: Check for CMT if (accum_moved &&
3286		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3287		 * 0)) {
3288		 */
3289		if (accum_moved && asoc->fast_retran_loss_recovery) {
3290			/*
3291			 * Strike the TSN if in fast-recovery and cum-ack
3292			 * moved.
3293			 */
3294			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3295				sctp_log_fr(biggest_tsn_newly_acked,
3296				    tp1->rec.data.TSN_seq,
3297				    tp1->sent,
3298				    SCTP_FR_LOG_STRIKE_CHUNK);
3299			}
3300			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3301				tp1->sent++;
3302			}
3303			if ((asoc->sctp_cmt_on_off > 0) &&
3304			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3305				/*
3306				 * CMT DAC algorithm: If SACK flag is set to
3307				 * 0, then lowest_newack test will not pass
3308				 * because it would have been set to the
3309				 * cumack earlier. If not already to be
3310				 * rtx'd, If not a mixed sack and if tp1 is
3311				 * not between two sacked TSNs, then mark by
3312				 * one more. NOTE that we are marking by one
3313				 * additional time since the SACK DAC flag
3314				 * indicates that two packets have been
3315				 * received after this missing TSN.
3316				 */
3317				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3318				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3319					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3320						sctp_log_fr(16 + num_dests_sacked,
3321						    tp1->rec.data.TSN_seq,
3322						    tp1->sent,
3323						    SCTP_FR_LOG_STRIKE_CHUNK);
3324					}
3325					tp1->sent++;
3326				}
3327			}
3328		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3329		    (asoc->sctp_cmt_on_off == 0)) {
3330			/*
3331			 * For those that have done a FR we must take
3332			 * special consideration if we strike. I.e the
3333			 * biggest_newly_acked must be higher than the
3334			 * sending_seq at the time we did the FR.
3335			 */
3336			if (
3337#ifdef SCTP_FR_TO_ALTERNATE
3338			/*
3339			 * If FR's go to new networks, then we must only do
3340			 * this for singly homed asoc's. However if the FR's
3341			 * go to the same network (Armando's work) then its
3342			 * ok to FR multiple times.
3343			 */
3344			    (asoc->numnets < 2)
3345#else
3346			    (1)
3347#endif
3348			    ) {
3349
3350				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3351				    tp1->rec.data.fast_retran_tsn)) {
3352					/*
3353					 * Strike the TSN, since this ack is
3354					 * beyond where things were when we
3355					 * did a FR.
3356					 */
3357					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3358						sctp_log_fr(biggest_tsn_newly_acked,
3359						    tp1->rec.data.TSN_seq,
3360						    tp1->sent,
3361						    SCTP_FR_LOG_STRIKE_CHUNK);
3362					}
3363					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3364						tp1->sent++;
3365					}
3366					strike_flag = 1;
3367					if ((asoc->sctp_cmt_on_off > 0) &&
3368					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3369						/*
3370						 * CMT DAC algorithm: If
3371						 * SACK flag is set to 0,
3372						 * then lowest_newack test
3373						 * will not pass because it
3374						 * would have been set to
3375						 * the cumack earlier. If
3376						 * not already to be rtx'd,
3377						 * If not a mixed sack and
3378						 * if tp1 is not between two
3379						 * sacked TSNs, then mark by
3380						 * one more. NOTE that we
3381						 * are marking by one
3382						 * additional time since the
3383						 * SACK DAC flag indicates
3384						 * that two packets have
3385						 * been received after this
3386						 * missing TSN.
3387						 */
3388						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3389						    (num_dests_sacked == 1) &&
3390						    SCTP_TSN_GT(this_sack_lowest_newack,
3391						    tp1->rec.data.TSN_seq)) {
3392							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3393								sctp_log_fr(32 + num_dests_sacked,
3394								    tp1->rec.data.TSN_seq,
3395								    tp1->sent,
3396								    SCTP_FR_LOG_STRIKE_CHUNK);
3397							}
3398							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3399								tp1->sent++;
3400							}
3401						}
3402					}
3403				}
3404			}
3405			/*
3406			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3407			 * algo covers HTNA.
3408			 */
3409		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3410		    biggest_tsn_newly_acked)) {
3411			/*
3412			 * We don't strike these: This is the  HTNA
3413			 * algorithm i.e. we don't strike If our TSN is
3414			 * larger than the Highest TSN Newly Acked.
3415			 */
3416			;
3417		} else {
3418			/* Strike the TSN */
3419			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3420				sctp_log_fr(biggest_tsn_newly_acked,
3421				    tp1->rec.data.TSN_seq,
3422				    tp1->sent,
3423				    SCTP_FR_LOG_STRIKE_CHUNK);
3424			}
3425			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3426				tp1->sent++;
3427			}
3428			if ((asoc->sctp_cmt_on_off > 0) &&
3429			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3430				/*
3431				 * CMT DAC algorithm: If SACK flag is set to
3432				 * 0, then lowest_newack test will not pass
3433				 * because it would have been set to the
3434				 * cumack earlier. If not already to be
3435				 * rtx'd, If not a mixed sack and if tp1 is
3436				 * not between two sacked TSNs, then mark by
3437				 * one more. NOTE that we are marking by one
3438				 * additional time since the SACK DAC flag
3439				 * indicates that two packets have been
3440				 * received after this missing TSN.
3441				 */
3442				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3443				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3444					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3445						sctp_log_fr(48 + num_dests_sacked,
3446						    tp1->rec.data.TSN_seq,
3447						    tp1->sent,
3448						    SCTP_FR_LOG_STRIKE_CHUNK);
3449					}
3450					tp1->sent++;
3451				}
3452			}
3453		}
3454		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3455			struct sctp_nets *alt;
3456
3457			/* fix counts and things */
3458			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3459				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3460				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3461				    tp1->book_size,
3462				    (uintptr_t) tp1->whoTo,
3463				    tp1->rec.data.TSN_seq);
3464			}
3465			if (tp1->whoTo) {
3466				tp1->whoTo->net_ack++;
3467				sctp_flight_size_decrease(tp1);
3468				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3469					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3470					    tp1);
3471				}
3472			}
3473			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3474				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3475				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3476			}
3477			/* add back to the rwnd */
3478			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3479
3480			/* remove from the total flight */
3481			sctp_total_flight_decrease(stcb, tp1);
3482
3483			if ((stcb->asoc.peer_supports_prsctp) &&
3484			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3485				/*
3486				 * Has it been retransmitted tv_sec times? -
3487				 * we store the retran count there.
3488				 */
3489				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3490					/* Yes, so drop it */
3491					if (tp1->data != NULL) {
3492						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3493						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3494						    SCTP_SO_NOT_LOCKED);
3495					}
3496					/* Make sure to flag we had a FR */
3497					tp1->whoTo->net_ack++;
3498					continue;
3499				}
3500			}
3501			/* printf("OK, we are now ready to FR this guy\n"); */
3502			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3503				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3504				    0, SCTP_FR_MARKED);
3505			}
3506			if (strike_flag) {
3507				/* This is a subsequent FR */
3508				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3509			}
3510			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3511			if (asoc->sctp_cmt_on_off > 0) {
3512				/*
3513				 * CMT: Using RTX_SSTHRESH policy for CMT.
3514				 * If CMT is being used, then pick dest with
3515				 * largest ssthresh for any retransmission.
3516				 */
3517				tp1->no_fr_allowed = 1;
3518				alt = tp1->whoTo;
3519				/* sa_ignore NO_NULL_CHK */
3520				if (asoc->sctp_cmt_pf > 0) {
3521					/*
3522					 * JRS 5/18/07 - If CMT PF is on,
3523					 * use the PF version of
3524					 * find_alt_net()
3525					 */
3526					alt = sctp_find_alternate_net(stcb, alt, 2);
3527				} else {
3528					/*
3529					 * JRS 5/18/07 - If only CMT is on,
3530					 * use the CMT version of
3531					 * find_alt_net()
3532					 */
3533					/* sa_ignore NO_NULL_CHK */
3534					alt = sctp_find_alternate_net(stcb, alt, 1);
3535				}
3536				if (alt == NULL) {
3537					alt = tp1->whoTo;
3538				}
3539				/*
3540				 * CUCv2: If a different dest is picked for
3541				 * the retransmission, then new
3542				 * (rtx-)pseudo_cumack needs to be tracked
3543				 * for orig dest. Let CUCv2 track new (rtx-)
3544				 * pseudo-cumack always.
3545				 */
3546				if (tp1->whoTo) {
3547					tp1->whoTo->find_pseudo_cumack = 1;
3548					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3549				}
3550			} else {/* CMT is OFF */
3551
3552#ifdef SCTP_FR_TO_ALTERNATE
3553				/* Can we find an alternate? */
3554				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3555#else
3556				/*
3557				 * default behavior is to NOT retransmit
3558				 * FR's to an alternate. Armando Caro's
3559				 * paper details why.
3560				 */
3561				alt = tp1->whoTo;
3562#endif
3563			}
3564
3565			tp1->rec.data.doing_fast_retransmit = 1;
3566			tot_retrans++;
3567			/* mark the sending seq for possible subsequent FR's */
3568			/*
3569			 * printf("Marking TSN for FR new value %x\n",
3570			 * (uint32_t)tpi->rec.data.TSN_seq);
3571			 */
3572			if (TAILQ_EMPTY(&asoc->send_queue)) {
3573				/*
3574				 * If the queue of send is empty then its
3575				 * the next sequence number that will be
3576				 * assigned so we subtract one from this to
3577				 * get the one we last sent.
3578				 */
3579				tp1->rec.data.fast_retran_tsn = sending_seq;
3580			} else {
3581				/*
3582				 * If there are chunks on the send queue
3583				 * (unsent data that has made it from the
3584				 * stream queues but not out the door, we
3585				 * take the first one (which will have the
3586				 * lowest TSN) and subtract one to get the
3587				 * one we last sent.
3588				 */
3589				struct sctp_tmit_chunk *ttt;
3590
3591				ttt = TAILQ_FIRST(&asoc->send_queue);
3592				tp1->rec.data.fast_retran_tsn =
3593				    ttt->rec.data.TSN_seq;
3594			}
3595
3596			if (tp1->do_rtt) {
3597				/*
3598				 * this guy had a RTO calculation pending on
3599				 * it, cancel it
3600				 */
3601				if (tp1->whoTo->rto_needed == 0) {
3602					tp1->whoTo->rto_needed = 1;
3603				}
3604				tp1->do_rtt = 0;
3605			}
3606			if (alt != tp1->whoTo) {
3607				/* yes, there is an alternate. */
3608				sctp_free_remote_addr(tp1->whoTo);
3609				/* sa_ignore FREED_MEMORY */
3610				tp1->whoTo = alt;
3611				atomic_add_int(&alt->ref_count, 1);
3612			}
3613		}
3614	}
3615}
3616
3617struct sctp_tmit_chunk *
3618sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3619    struct sctp_association *asoc)
3620{
3621	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3622	struct timeval now;
3623	int now_filled = 0;
3624
3625	if (asoc->peer_supports_prsctp == 0) {
3626		return (NULL);
3627	}
3628	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3629		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3630		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3631			/* no chance to advance, out of here */
3632			break;
3633		}
3634		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3635			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3636				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3637				    asoc->advanced_peer_ack_point,
3638				    tp1->rec.data.TSN_seq, 0, 0);
3639			}
3640		}
3641		if (!PR_SCTP_ENABLED(tp1->flags)) {
3642			/*
3643			 * We can't fwd-tsn past any that are reliable aka
3644			 * retransmitted until the asoc fails.
3645			 */
3646			break;
3647		}
3648		if (!now_filled) {
3649			(void)SCTP_GETTIME_TIMEVAL(&now);
3650			now_filled = 1;
3651		}
3652		/*
3653		 * now we got a chunk which is marked for another
3654		 * retransmission to a PR-stream but has run out its chances
3655		 * already maybe OR has been marked to skip now. Can we skip
3656		 * it if its a resend?
3657		 */
3658		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3659		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3660			/*
3661			 * Now is this one marked for resend and its time is
3662			 * now up?
3663			 */
3664			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3665				/* Yes so drop it */
3666				if (tp1->data) {
3667					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3668					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3669					    SCTP_SO_NOT_LOCKED);
3670				}
3671			} else {
3672				/*
3673				 * No, we are done when hit one for resend
3674				 * whos time as not expired.
3675				 */
3676				break;
3677			}
3678		}
3679		/*
3680		 * Ok now if this chunk is marked to drop it we can clean up
3681		 * the chunk, advance our peer ack point and we can check
3682		 * the next chunk.
3683		 */
3684		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3685			/* advance PeerAckPoint goes forward */
3686			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3687				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3688				a_adv = tp1;
3689			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3690				/* No update but we do save the chk */
3691				a_adv = tp1;
3692			}
3693		} else {
3694			/*
3695			 * If it is still in RESEND we can advance no
3696			 * further
3697			 */
3698			break;
3699		}
3700	}
3701	return (a_adv);
3702}
3703
3704static int
3705sctp_fs_audit(struct sctp_association *asoc)
3706{
3707	struct sctp_tmit_chunk *chk;
3708	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3709	int entry_flight, entry_cnt, ret;
3710
3711	entry_flight = asoc->total_flight;
3712	entry_cnt = asoc->total_flight_count;
3713	ret = 0;
3714
3715	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3716		return (0);
3717
3718	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3719		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3720			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3721			    chk->rec.data.TSN_seq,
3722			    chk->send_size,
3723			    chk->snd_count
3724			    );
3725			inflight++;
3726		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3727			resend++;
3728		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3729			inbetween++;
3730		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3731			above++;
3732		} else {
3733			acked++;
3734		}
3735	}
3736
3737	if ((inflight > 0) || (inbetween > 0)) {
3738#ifdef INVARIANTS
3739		panic("Flight size-express incorrect? \n");
3740#else
3741		printf("asoc->total_flight:%d cnt:%d\n",
3742		    entry_flight, entry_cnt);
3743
3744		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3745		    inflight, inbetween, resend, above, acked);
3746		ret = 1;
3747#endif
3748	}
3749	return (ret);
3750}
3751
3752
3753static void
3754sctp_window_probe_recovery(struct sctp_tcb *stcb,
3755    struct sctp_association *asoc,
3756    struct sctp_tmit_chunk *tp1)
3757{
3758	tp1->window_probe = 0;
3759	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3760		/* TSN's skipped we do NOT move back. */
3761		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3762		    tp1->whoTo->flight_size,
3763		    tp1->book_size,
3764		    (uintptr_t) tp1->whoTo,
3765		    tp1->rec.data.TSN_seq);
3766		return;
3767	}
3768	/* First setup this by shrinking flight */
3769	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3770		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3771		    tp1);
3772	}
3773	sctp_flight_size_decrease(tp1);
3774	sctp_total_flight_decrease(stcb, tp1);
3775	/* Now mark for resend */
3776	tp1->sent = SCTP_DATAGRAM_RESEND;
3777	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3778
3779	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3780		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3781		    tp1->whoTo->flight_size,
3782		    tp1->book_size,
3783		    (uintptr_t) tp1->whoTo,
3784		    tp1->rec.data.TSN_seq);
3785	}
3786}
3787
3788void
3789sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3790    uint32_t rwnd, int *abort_now, int ecne_seen)
3791{
3792	struct sctp_nets *net;
3793	struct sctp_association *asoc;
3794	struct sctp_tmit_chunk *tp1, *tp2;
3795	uint32_t old_rwnd;
3796	int win_probe_recovery = 0;
3797	int win_probe_recovered = 0;
3798	int j, done_once = 0;
3799	int rto_ok = 1;
3800
3801	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3802		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3803		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3804	}
3805	SCTP_TCB_LOCK_ASSERT(stcb);
3806#ifdef SCTP_ASOCLOG_OF_TSNS
3807	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3808	stcb->asoc.cumack_log_at++;
3809	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3810		stcb->asoc.cumack_log_at = 0;
3811	}
3812#endif
3813	asoc = &stcb->asoc;
3814	old_rwnd = asoc->peers_rwnd;
3815	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3816		/* old ack */
3817		return;
3818	} else if (asoc->last_acked_seq == cumack) {
3819		/* Window update sack */
3820		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3821		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3822		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3823			/* SWS sender side engages */
3824			asoc->peers_rwnd = 0;
3825		}
3826		if (asoc->peers_rwnd > old_rwnd) {
3827			goto again;
3828		}
3829		return;
3830	}
3831	/* First setup for CC stuff */
3832	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3833		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3834			/* Drag along the window_tsn for cwr's */
3835			net->cwr_window_tsn = cumack;
3836		}
3837		net->prev_cwnd = net->cwnd;
3838		net->net_ack = 0;
3839		net->net_ack2 = 0;
3840
3841		/*
3842		 * CMT: Reset CUC and Fast recovery algo variables before
3843		 * SACK processing
3844		 */
3845		net->new_pseudo_cumack = 0;
3846		net->will_exit_fast_recovery = 0;
3847		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3848			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3849		}
3850	}
3851	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3852		uint32_t send_s;
3853
3854		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3855			tp1 = TAILQ_LAST(&asoc->sent_queue,
3856			    sctpchunk_listhead);
3857			send_s = tp1->rec.data.TSN_seq + 1;
3858		} else {
3859			send_s = asoc->sending_seq;
3860		}
3861		if (SCTP_TSN_GE(cumack, send_s)) {
3862#ifndef INVARIANTS
3863			struct mbuf *oper;
3864
3865#endif
3866#ifdef INVARIANTS
3867			panic("Impossible sack 1");
3868#else
3869
3870			*abort_now = 1;
3871			/* XXX */
3872			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3873			    0, M_DONTWAIT, 1, MT_DATA);
3874			if (oper) {
3875				struct sctp_paramhdr *ph;
3876				uint32_t *ippp;
3877
3878				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3879				    sizeof(uint32_t);
3880				ph = mtod(oper, struct sctp_paramhdr *);
3881				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3882				ph->param_length = htons(SCTP_BUF_LEN(oper));
3883				ippp = (uint32_t *) (ph + 1);
3884				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3885			}
3886			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3887			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3888			return;
3889#endif
3890		}
3891	}
3892	asoc->this_sack_highest_gap = cumack;
3893	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3894		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3895		    stcb->asoc.overall_error_count,
3896		    0,
3897		    SCTP_FROM_SCTP_INDATA,
3898		    __LINE__);
3899	}
3900	stcb->asoc.overall_error_count = 0;
3901	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3902		/* process the new consecutive TSN first */
3903		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3904			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3905				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3906					printf("Warning, an unsent is now acked?\n");
3907				}
3908				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3909					/*
3910					 * If it is less than ACKED, it is
3911					 * now no-longer in flight. Higher
3912					 * values may occur during marking
3913					 */
3914					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3915						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3916							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3917							    tp1->whoTo->flight_size,
3918							    tp1->book_size,
3919							    (uintptr_t) tp1->whoTo,
3920							    tp1->rec.data.TSN_seq);
3921						}
3922						sctp_flight_size_decrease(tp1);
3923						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3924							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3925							    tp1);
3926						}
3927						/* sa_ignore NO_NULL_CHK */
3928						sctp_total_flight_decrease(stcb, tp1);
3929					}
3930					tp1->whoTo->net_ack += tp1->send_size;
3931					if (tp1->snd_count < 2) {
3932						/*
3933						 * True non-retransmited
3934						 * chunk
3935						 */
3936						tp1->whoTo->net_ack2 +=
3937						    tp1->send_size;
3938
3939						/* update RTO too? */
3940						if (tp1->do_rtt) {
3941							if (rto_ok) {
3942								tp1->whoTo->RTO =
3943								/*
3944								 * sa_ignore
3945								 * NO_NULL_CH
3946								 * K
3947								 */
3948								    sctp_calculate_rto(stcb,
3949								    asoc, tp1->whoTo,
3950								    &tp1->sent_rcv_time,
3951								    sctp_align_safe_nocopy,
3952								    SCTP_RTT_FROM_DATA);
3953								rto_ok = 0;
3954							}
3955							if (tp1->whoTo->rto_needed == 0) {
3956								tp1->whoTo->rto_needed = 1;
3957							}
3958							tp1->do_rtt = 0;
3959						}
3960					}
3961					/*
3962					 * CMT: CUCv2 algorithm. From the
3963					 * cumack'd TSNs, for each TSN being
3964					 * acked for the first time, set the
3965					 * following variables for the
3966					 * corresp destination.
3967					 * new_pseudo_cumack will trigger a
3968					 * cwnd update.
3969					 * find_(rtx_)pseudo_cumack will
3970					 * trigger search for the next
3971					 * expected (rtx-)pseudo-cumack.
3972					 */
3973					tp1->whoTo->new_pseudo_cumack = 1;
3974					tp1->whoTo->find_pseudo_cumack = 1;
3975					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3976
3977					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3978						/* sa_ignore NO_NULL_CHK */
3979						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3980					}
3981				}
3982				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3983					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3984				}
3985				if (tp1->rec.data.chunk_was_revoked) {
3986					/* deflate the cwnd */
3987					tp1->whoTo->cwnd -= tp1->book_size;
3988					tp1->rec.data.chunk_was_revoked = 0;
3989				}
3990				tp1->sent = SCTP_DATAGRAM_ACKED;
3991				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3992				if (tp1->data) {
3993					/* sa_ignore NO_NULL_CHK */
3994					sctp_free_bufspace(stcb, asoc, tp1, 1);
3995					sctp_m_freem(tp1->data);
3996					tp1->data = NULL;
3997				}
3998				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3999					sctp_log_sack(asoc->last_acked_seq,
4000					    cumack,
4001					    tp1->rec.data.TSN_seq,
4002					    0,
4003					    0,
4004					    SCTP_LOG_FREE_SENT);
4005				}
4006				asoc->sent_queue_cnt--;
4007				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4008			} else {
4009				break;
4010			}
4011		}
4012
4013	}
4014	/* sa_ignore NO_NULL_CHK */
4015	if (stcb->sctp_socket) {
4016#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4017		struct socket *so;
4018
4019#endif
4020		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4021		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4022			/* sa_ignore NO_NULL_CHK */
4023			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4024		}
4025#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4026		so = SCTP_INP_SO(stcb->sctp_ep);
4027		atomic_add_int(&stcb->asoc.refcnt, 1);
4028		SCTP_TCB_UNLOCK(stcb);
4029		SCTP_SOCKET_LOCK(so, 1);
4030		SCTP_TCB_LOCK(stcb);
4031		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4032		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4033			/* assoc was freed while we were unlocked */
4034			SCTP_SOCKET_UNLOCK(so, 1);
4035			return;
4036		}
4037#endif
4038		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4039#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4040		SCTP_SOCKET_UNLOCK(so, 1);
4041#endif
4042	} else {
4043		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4044			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4045		}
4046	}
4047
4048	/* JRS - Use the congestion control given in the CC module */
4049	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4050		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4051			if (net->net_ack2 > 0) {
4052				/*
4053				 * Karn's rule applies to clearing error
4054				 * count, this is optional.
4055				 */
4056				net->error_count = 0;
4057				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4058					/* addr came good */
4059					net->dest_state |= SCTP_ADDR_REACHABLE;
4060					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4061					    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
4062				}
4063				if (net == stcb->asoc.primary_destination) {
4064					if (stcb->asoc.alternate) {
4065						/*
4066						 * release the alternate,
4067						 * primary is good
4068						 */
4069						sctp_free_remote_addr(stcb->asoc.alternate);
4070						stcb->asoc.alternate = NULL;
4071					}
4072				}
4073				if (net->dest_state & SCTP_ADDR_PF) {
4074					net->dest_state &= ~SCTP_ADDR_PF;
4075					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4076					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4077					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4078					/* Done with this net */
4079					net->net_ack = 0;
4080				}
4081				/* restore any doubled timers */
4082				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4083				if (net->RTO < stcb->asoc.minrto) {
4084					net->RTO = stcb->asoc.minrto;
4085				}
4086				if (net->RTO > stcb->asoc.maxrto) {
4087					net->RTO = stcb->asoc.maxrto;
4088				}
4089			}
4090		}
4091		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4092	}
4093	asoc->last_acked_seq = cumack;
4094
4095	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4096		/* nothing left in-flight */
4097		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4098			net->flight_size = 0;
4099			net->partial_bytes_acked = 0;
4100		}
4101		asoc->total_flight = 0;
4102		asoc->total_flight_count = 0;
4103	}
4104	/* RWND update */
4105	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4106	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4107	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4108		/* SWS sender side engages */
4109		asoc->peers_rwnd = 0;
4110	}
4111	if (asoc->peers_rwnd > old_rwnd) {
4112		win_probe_recovery = 1;
4113	}
4114	/* Now assure a timer where data is queued at */
4115again:
4116	j = 0;
4117	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4118		int to_ticks;
4119
4120		if (win_probe_recovery && (net->window_probe)) {
4121			win_probe_recovered = 1;
4122			/*
4123			 * Find first chunk that was used with window probe
4124			 * and clear the sent
4125			 */
4126			/* sa_ignore FREED_MEMORY */
4127			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4128				if (tp1->window_probe) {
4129					/* move back to data send queue */
4130					sctp_window_probe_recovery(stcb, asoc, tp1);
4131					break;
4132				}
4133			}
4134		}
4135		if (net->RTO == 0) {
4136			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4137		} else {
4138			to_ticks = MSEC_TO_TICKS(net->RTO);
4139		}
4140		if (net->flight_size) {
4141			j++;
4142			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4143			    sctp_timeout_handler, &net->rxt_timer);
4144			if (net->window_probe) {
4145				net->window_probe = 0;
4146			}
4147		} else {
4148			if (net->window_probe) {
4149				/*
4150				 * In window probes we must assure a timer
4151				 * is still running there
4152				 */
4153				net->window_probe = 0;
4154				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4155					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4156					    sctp_timeout_handler, &net->rxt_timer);
4157				}
4158			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4159				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4160				    stcb, net,
4161				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4162			}
4163		}
4164	}
4165	if ((j == 0) &&
4166	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4167	    (asoc->sent_queue_retran_cnt == 0) &&
4168	    (win_probe_recovered == 0) &&
4169	    (done_once == 0)) {
4170		/*
4171		 * huh, this should not happen unless all packets are
4172		 * PR-SCTP and marked to skip of course.
4173		 */
4174		if (sctp_fs_audit(asoc)) {
4175			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4176				net->flight_size = 0;
4177			}
4178			asoc->total_flight = 0;
4179			asoc->total_flight_count = 0;
4180			asoc->sent_queue_retran_cnt = 0;
4181			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4182				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4183					sctp_flight_size_increase(tp1);
4184					sctp_total_flight_increase(stcb, tp1);
4185				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4186					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4187				}
4188			}
4189		}
4190		done_once = 1;
4191		goto again;
4192	}
4193	/**********************************/
4194	/* Now what about shutdown issues */
4195	/**********************************/
4196	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4197		/* nothing left on sendqueue.. consider done */
4198		/* clean up */
4199		if ((asoc->stream_queue_cnt == 1) &&
4200		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4201		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4202		    (asoc->locked_on_sending)
4203		    ) {
4204			struct sctp_stream_queue_pending *sp;
4205
4206			/*
4207			 * I may be in a state where we got all across.. but
4208			 * cannot write more due to a shutdown... we abort
4209			 * since the user did not indicate EOR in this case.
4210			 * The sp will be cleaned during free of the asoc.
4211			 */
4212			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4213			    sctp_streamhead);
4214			if ((sp) && (sp->length == 0)) {
4215				/* Let cleanup code purge it */
4216				if (sp->msg_is_complete) {
4217					asoc->stream_queue_cnt--;
4218				} else {
4219					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4220					asoc->locked_on_sending = NULL;
4221					asoc->stream_queue_cnt--;
4222				}
4223			}
4224		}
4225		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4226		    (asoc->stream_queue_cnt == 0)) {
4227			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4228				/* Need to abort here */
4229				struct mbuf *oper;
4230
4231		abort_out_now:
4232				*abort_now = 1;
4233				/* XXX */
4234				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4235				    0, M_DONTWAIT, 1, MT_DATA);
4236				if (oper) {
4237					struct sctp_paramhdr *ph;
4238					uint32_t *ippp;
4239
4240					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4241					    sizeof(uint32_t);
4242					ph = mtod(oper, struct sctp_paramhdr *);
4243					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4244					ph->param_length = htons(SCTP_BUF_LEN(oper));
4245					ippp = (uint32_t *) (ph + 1);
4246					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4247				}
4248				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4249				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4250			} else {
4251				struct sctp_nets *netp;
4252
4253				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4254				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4255					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4256				}
4257				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4258				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4259				sctp_stop_timers_for_shutdown(stcb);
4260				if (asoc->alternate) {
4261					netp = asoc->alternate;
4262				} else {
4263					netp = asoc->primary_destination;
4264				}
4265				sctp_send_shutdown(stcb, netp);
4266				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4267				    stcb->sctp_ep, stcb, netp);
4268				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4269				    stcb->sctp_ep, stcb, netp);
4270			}
4271		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4272		    (asoc->stream_queue_cnt == 0)) {
4273			struct sctp_nets *netp;
4274
4275			if (asoc->alternate) {
4276				netp = asoc->alternate;
4277			} else {
4278				netp = asoc->primary_destination;
4279			}
4280			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4281				goto abort_out_now;
4282			}
4283			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4284			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4285			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4286			sctp_send_shutdown_ack(stcb, netp);
4287			sctp_stop_timers_for_shutdown(stcb);
4288			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4289			    stcb->sctp_ep, stcb, netp);
4290		}
4291	}
4292	/*********************************************/
4293	/* Here we perform PR-SCTP procedures        */
4294	/* (section 4.2)                             */
4295	/*********************************************/
4296	/* C1. update advancedPeerAckPoint */
4297	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4298		asoc->advanced_peer_ack_point = cumack;
4299	}
4300	/* PR-Sctp issues need to be addressed too */
4301	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4302		struct sctp_tmit_chunk *lchk;
4303		uint32_t old_adv_peer_ack_point;
4304
4305		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4306		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4307		/* C3. See if we need to send a Fwd-TSN */
4308		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4309			/*
4310			 * ISSUE with ECN, see FWD-TSN processing.
4311			 */
4312			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4313				send_forward_tsn(stcb, asoc);
4314			} else if (lchk) {
4315				/* try to FR fwd-tsn's that get lost too */
4316				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4317					send_forward_tsn(stcb, asoc);
4318				}
4319			}
4320		}
4321		if (lchk) {
4322			/* Assure a timer is up */
4323			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4324			    stcb->sctp_ep, stcb, lchk->whoTo);
4325		}
4326	}
4327	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4328		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4329		    rwnd,
4330		    stcb->asoc.peers_rwnd,
4331		    stcb->asoc.total_flight,
4332		    stcb->asoc.total_output_queue_size);
4333	}
4334}
4335
4336void
4337sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4338    struct sctp_tcb *stcb,
4339    uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4340    int *abort_now, uint8_t flags,
4341    uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4342{
4343	struct sctp_association *asoc;
4344	struct sctp_tmit_chunk *tp1, *tp2;
4345	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4346	uint16_t wake_him = 0;
4347	uint32_t send_s = 0;
4348	long j;
4349	int accum_moved = 0;
4350	int will_exit_fast_recovery = 0;
4351	uint32_t a_rwnd, old_rwnd;
4352	int win_probe_recovery = 0;
4353	int win_probe_recovered = 0;
4354	struct sctp_nets *net = NULL;
4355	int done_once;
4356	int rto_ok = 1;
4357	uint8_t reneged_all = 0;
4358	uint8_t cmt_dac_flag;
4359
4360	/*
4361	 * we take any chance we can to service our queues since we cannot
4362	 * get awoken when the socket is read from :<
4363	 */
4364	/*
4365	 * Now perform the actual SACK handling: 1) Verify that it is not an
4366	 * old sack, if so discard. 2) If there is nothing left in the send
4367	 * queue (cum-ack is equal to last acked) then you have a duplicate
4368	 * too, update any rwnd change and verify no timers are running.
4369	 * then return. 3) Process any new consequtive data i.e. cum-ack
4370	 * moved process these first and note that it moved. 4) Process any
4371	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4372	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4373	 * sync up flightsizes and things, stop all timers and also check
4374	 * for shutdown_pending state. If so then go ahead and send off the
4375	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4376	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4377	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4378	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4379	 * if in shutdown_recv state.
4380	 */
4381	SCTP_TCB_LOCK_ASSERT(stcb);
4382	/* CMT DAC algo */
4383	this_sack_lowest_newack = 0;
4384	SCTP_STAT_INCR(sctps_slowpath_sack);
4385	last_tsn = cum_ack;
4386	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4387#ifdef SCTP_ASOCLOG_OF_TSNS
4388	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4389	stcb->asoc.cumack_log_at++;
4390	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4391		stcb->asoc.cumack_log_at = 0;
4392	}
4393#endif
4394	a_rwnd = rwnd;
4395
4396	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4397		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4398		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4399	}
4400	old_rwnd = stcb->asoc.peers_rwnd;
4401	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4402		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4403		    stcb->asoc.overall_error_count,
4404		    0,
4405		    SCTP_FROM_SCTP_INDATA,
4406		    __LINE__);
4407	}
4408	stcb->asoc.overall_error_count = 0;
4409	asoc = &stcb->asoc;
4410	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4411		sctp_log_sack(asoc->last_acked_seq,
4412		    cum_ack,
4413		    0,
4414		    num_seg,
4415		    num_dup,
4416		    SCTP_LOG_NEW_SACK);
4417	}
4418	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4419		uint16_t i;
4420		uint32_t *dupdata, dblock;
4421
4422		for (i = 0; i < num_dup; i++) {
4423			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4424			    sizeof(uint32_t), (uint8_t *) & dblock);
4425			if (dupdata == NULL) {
4426				break;
4427			}
4428			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4429		}
4430	}
4431	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4432		/* reality check */
4433		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4434			tp1 = TAILQ_LAST(&asoc->sent_queue,
4435			    sctpchunk_listhead);
4436			send_s = tp1->rec.data.TSN_seq + 1;
4437		} else {
4438			tp1 = NULL;
4439			send_s = asoc->sending_seq;
4440		}
4441		if (SCTP_TSN_GE(cum_ack, send_s)) {
4442			struct mbuf *oper;
4443
4444			/*
4445			 * no way, we have not even sent this TSN out yet.
4446			 * Peer is hopelessly messed up with us.
4447			 */
4448			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4449			    cum_ack, send_s);
4450			if (tp1) {
4451				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4452				    tp1->rec.data.TSN_seq, tp1);
4453			}
4454	hopeless_peer:
4455			*abort_now = 1;
4456			/* XXX */
4457			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4458			    0, M_DONTWAIT, 1, MT_DATA);
4459			if (oper) {
4460				struct sctp_paramhdr *ph;
4461				uint32_t *ippp;
4462
4463				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4464				    sizeof(uint32_t);
4465				ph = mtod(oper, struct sctp_paramhdr *);
4466				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4467				ph->param_length = htons(SCTP_BUF_LEN(oper));
4468				ippp = (uint32_t *) (ph + 1);
4469				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4470			}
4471			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4472			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4473			return;
4474		}
4475	}
4476	/**********************/
4477	/* 1) check the range */
4478	/**********************/
4479	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4480		/* acking something behind */
4481		return;
4482	}
4483	/* update the Rwnd of the peer */
4484	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4485	    TAILQ_EMPTY(&asoc->send_queue) &&
4486	    (asoc->stream_queue_cnt == 0)) {
4487		/* nothing left on send/sent and strmq */
4488		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4489			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4490			    asoc->peers_rwnd, 0, 0, a_rwnd);
4491		}
4492		asoc->peers_rwnd = a_rwnd;
4493		if (asoc->sent_queue_retran_cnt) {
4494			asoc->sent_queue_retran_cnt = 0;
4495		}
4496		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4497			/* SWS sender side engages */
4498			asoc->peers_rwnd = 0;
4499		}
4500		/* stop any timers */
4501		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4502			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4503			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4504			net->partial_bytes_acked = 0;
4505			net->flight_size = 0;
4506		}
4507		asoc->total_flight = 0;
4508		asoc->total_flight_count = 0;
4509		return;
4510	}
4511	/*
4512	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4513	 * things. The total byte count acked is tracked in netAckSz AND
4514	 * netAck2 is used to track the total bytes acked that are un-
4515	 * amibguious and were never retransmitted. We track these on a per
4516	 * destination address basis.
4517	 */
4518	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4519		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4520			/* Drag along the window_tsn for cwr's */
4521			net->cwr_window_tsn = cum_ack;
4522		}
4523		net->prev_cwnd = net->cwnd;
4524		net->net_ack = 0;
4525		net->net_ack2 = 0;
4526
4527		/*
4528		 * CMT: Reset CUC and Fast recovery algo variables before
4529		 * SACK processing
4530		 */
4531		net->new_pseudo_cumack = 0;
4532		net->will_exit_fast_recovery = 0;
4533		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4534			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4535		}
4536	}
4537	/* process the new consecutive TSN first */
4538	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4539		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4540			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4541				accum_moved = 1;
4542				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4543					/*
4544					 * If it is less than ACKED, it is
4545					 * now no-longer in flight. Higher
4546					 * values may occur during marking
4547					 */
4548					if ((tp1->whoTo->dest_state &
4549					    SCTP_ADDR_UNCONFIRMED) &&
4550					    (tp1->snd_count < 2)) {
4551						/*
4552						 * If there was no retran
4553						 * and the address is
4554						 * un-confirmed and we sent
4555						 * there and are now
4556						 * sacked.. its confirmed,
4557						 * mark it so.
4558						 */
4559						tp1->whoTo->dest_state &=
4560						    ~SCTP_ADDR_UNCONFIRMED;
4561					}
4562					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4563						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4564							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4565							    tp1->whoTo->flight_size,
4566							    tp1->book_size,
4567							    (uintptr_t) tp1->whoTo,
4568							    tp1->rec.data.TSN_seq);
4569						}
4570						sctp_flight_size_decrease(tp1);
4571						sctp_total_flight_decrease(stcb, tp1);
4572						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4573							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4574							    tp1);
4575						}
4576					}
4577					tp1->whoTo->net_ack += tp1->send_size;
4578
4579					/* CMT SFR and DAC algos */
4580					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4581					tp1->whoTo->saw_newack = 1;
4582
4583					if (tp1->snd_count < 2) {
4584						/*
4585						 * True non-retransmited
4586						 * chunk
4587						 */
4588						tp1->whoTo->net_ack2 +=
4589						    tp1->send_size;
4590
4591						/* update RTO too? */
4592						if (tp1->do_rtt) {
4593							if (rto_ok) {
4594								tp1->whoTo->RTO =
4595								    sctp_calculate_rto(stcb,
4596								    asoc, tp1->whoTo,
4597								    &tp1->sent_rcv_time,
4598								    sctp_align_safe_nocopy,
4599								    SCTP_RTT_FROM_DATA);
4600								rto_ok = 0;
4601							}
4602							if (tp1->whoTo->rto_needed == 0) {
4603								tp1->whoTo->rto_needed = 1;
4604							}
4605							tp1->do_rtt = 0;
4606						}
4607					}
4608					/*
4609					 * CMT: CUCv2 algorithm. From the
4610					 * cumack'd TSNs, for each TSN being
4611					 * acked for the first time, set the
4612					 * following variables for the
4613					 * corresp destination.
4614					 * new_pseudo_cumack will trigger a
4615					 * cwnd update.
4616					 * find_(rtx_)pseudo_cumack will
4617					 * trigger search for the next
4618					 * expected (rtx-)pseudo-cumack.
4619					 */
4620					tp1->whoTo->new_pseudo_cumack = 1;
4621					tp1->whoTo->find_pseudo_cumack = 1;
4622					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4623
4624
4625					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4626						sctp_log_sack(asoc->last_acked_seq,
4627						    cum_ack,
4628						    tp1->rec.data.TSN_seq,
4629						    0,
4630						    0,
4631						    SCTP_LOG_TSN_ACKED);
4632					}
4633					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4634						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4635					}
4636				}
4637				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4638					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4639#ifdef SCTP_AUDITING_ENABLED
4640					sctp_audit_log(0xB3,
4641					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4642#endif
4643				}
4644				if (tp1->rec.data.chunk_was_revoked) {
4645					/* deflate the cwnd */
4646					tp1->whoTo->cwnd -= tp1->book_size;
4647					tp1->rec.data.chunk_was_revoked = 0;
4648				}
4649				tp1->sent = SCTP_DATAGRAM_ACKED;
4650			}
4651		} else {
4652			break;
4653		}
4654	}
4655	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4656	/* always set this up to cum-ack */
4657	asoc->this_sack_highest_gap = last_tsn;
4658
4659	if ((num_seg > 0) || (num_nr_seg > 0)) {
4660
4661		/*
4662		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4663		 * to be greater than the cumack. Also reset saw_newack to 0
4664		 * for all dests.
4665		 */
4666		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4667			net->saw_newack = 0;
4668			net->this_sack_highest_newack = last_tsn;
4669		}
4670
4671		/*
4672		 * thisSackHighestGap will increase while handling NEW
4673		 * segments this_sack_highest_newack will increase while
4674		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4675		 * used for CMT DAC algo. saw_newack will also change.
4676		 */
4677		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4678		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4679		    num_seg, num_nr_seg, &rto_ok)) {
4680			wake_him++;
4681		}
4682		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4683			/*
4684			 * validate the biggest_tsn_acked in the gap acks if
4685			 * strict adherence is wanted.
4686			 */
4687			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4688				/*
4689				 * peer is either confused or we are under
4690				 * attack. We must abort.
4691				 */
4692				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4693				    biggest_tsn_acked,
4694				    send_s);
4695
4696				goto hopeless_peer;
4697			}
4698		}
4699	}
4700	/*******************************************/
4701	/* cancel ALL T3-send timer if accum moved */
4702	/*******************************************/
4703	if (asoc->sctp_cmt_on_off > 0) {
4704		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4705			if (net->new_pseudo_cumack)
4706				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4707				    stcb, net,
4708				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4709
4710		}
4711	} else {
4712		if (accum_moved) {
4713			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4714				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4715				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4716			}
4717		}
4718	}
4719	/********************************************/
4720	/* drop the acked chunks from the sentqueue */
4721	/********************************************/
4722	asoc->last_acked_seq = cum_ack;
4723
4724	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4725		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4726			break;
4727		}
4728		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4729			/* no more sent on list */
4730			printf("Warning, tp1->sent == %d and its now acked?\n",
4731			    tp1->sent);
4732		}
4733		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4734		if (tp1->pr_sctp_on) {
4735			if (asoc->pr_sctp_cnt != 0)
4736				asoc->pr_sctp_cnt--;
4737		}
4738		asoc->sent_queue_cnt--;
4739		if (tp1->data) {
4740			/* sa_ignore NO_NULL_CHK */
4741			sctp_free_bufspace(stcb, asoc, tp1, 1);
4742			sctp_m_freem(tp1->data);
4743			tp1->data = NULL;
4744			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4745				asoc->sent_queue_cnt_removeable--;
4746			}
4747		}
4748		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4749			sctp_log_sack(asoc->last_acked_seq,
4750			    cum_ack,
4751			    tp1->rec.data.TSN_seq,
4752			    0,
4753			    0,
4754			    SCTP_LOG_FREE_SENT);
4755		}
4756		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4757		wake_him++;
4758	}
4759	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4760#ifdef INVARIANTS
4761		panic("Warning flight size is postive and should be 0");
4762#else
4763		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4764		    asoc->total_flight);
4765#endif
4766		asoc->total_flight = 0;
4767	}
4768	/* sa_ignore NO_NULL_CHK */
4769	if ((wake_him) && (stcb->sctp_socket)) {
4770#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4771		struct socket *so;
4772
4773#endif
4774		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4775		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4776			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4777		}
4778#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4779		so = SCTP_INP_SO(stcb->sctp_ep);
4780		atomic_add_int(&stcb->asoc.refcnt, 1);
4781		SCTP_TCB_UNLOCK(stcb);
4782		SCTP_SOCKET_LOCK(so, 1);
4783		SCTP_TCB_LOCK(stcb);
4784		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4785		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4786			/* assoc was freed while we were unlocked */
4787			SCTP_SOCKET_UNLOCK(so, 1);
4788			return;
4789		}
4790#endif
4791		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4792#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4793		SCTP_SOCKET_UNLOCK(so, 1);
4794#endif
4795	} else {
4796		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4797			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4798		}
4799	}
4800
4801	if (asoc->fast_retran_loss_recovery && accum_moved) {
4802		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4803			/* Setup so we will exit RFC2582 fast recovery */
4804			will_exit_fast_recovery = 1;
4805		}
4806	}
4807	/*
4808	 * Check for revoked fragments:
4809	 *
4810	 * if Previous sack - Had no frags then we can't have any revoked if
4811	 * Previous sack - Had frag's then - If we now have frags aka
4812	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4813	 * some of them. else - The peer revoked all ACKED fragments, since
4814	 * we had some before and now we have NONE.
4815	 */
4816
4817	if (num_seg) {
4818		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4819		asoc->saw_sack_with_frags = 1;
4820	} else if (asoc->saw_sack_with_frags) {
4821		int cnt_revoked = 0;
4822
4823		/* Peer revoked all dg's marked or acked */
4824		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4825			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4826				tp1->sent = SCTP_DATAGRAM_SENT;
4827				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4828					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4829					    tp1->whoTo->flight_size,
4830					    tp1->book_size,
4831					    (uintptr_t) tp1->whoTo,
4832					    tp1->rec.data.TSN_seq);
4833				}
4834				sctp_flight_size_increase(tp1);
4835				sctp_total_flight_increase(stcb, tp1);
4836				tp1->rec.data.chunk_was_revoked = 1;
4837				/*
4838				 * To ensure that this increase in
4839				 * flightsize, which is artificial, does not
4840				 * throttle the sender, we also increase the
4841				 * cwnd artificially.
4842				 */
4843				tp1->whoTo->cwnd += tp1->book_size;
4844				cnt_revoked++;
4845			}
4846		}
4847		if (cnt_revoked) {
4848			reneged_all = 1;
4849		}
4850		asoc->saw_sack_with_frags = 0;
4851	}
4852	if (num_nr_seg > 0)
4853		asoc->saw_sack_with_nr_frags = 1;
4854	else
4855		asoc->saw_sack_with_nr_frags = 0;
4856
4857	/* JRS - Use the congestion control given in the CC module */
4858	if (ecne_seen == 0) {
4859		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4860			if (net->net_ack2 > 0) {
4861				/*
4862				 * Karn's rule applies to clearing error
4863				 * count, this is optional.
4864				 */
4865				net->error_count = 0;
4866				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4867					/* addr came good */
4868					net->dest_state |= SCTP_ADDR_REACHABLE;
4869					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4870					    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
4871				}
4872				if (net == stcb->asoc.primary_destination) {
4873					if (stcb->asoc.alternate) {
4874						/*
4875						 * release the alternate,
4876						 * primary is good
4877						 */
4878						sctp_free_remote_addr(stcb->asoc.alternate);
4879						stcb->asoc.alternate = NULL;
4880					}
4881				}
4882				if (net->dest_state & SCTP_ADDR_PF) {
4883					net->dest_state &= ~SCTP_ADDR_PF;
4884					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4885					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4886					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4887					/* Done with this net */
4888					net->net_ack = 0;
4889				}
4890				/* restore any doubled timers */
4891				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4892				if (net->RTO < stcb->asoc.minrto) {
4893					net->RTO = stcb->asoc.minrto;
4894				}
4895				if (net->RTO > stcb->asoc.maxrto) {
4896					net->RTO = stcb->asoc.maxrto;
4897				}
4898			}
4899		}
4900		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4901	}
4902	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4903		/* nothing left in-flight */
4904		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4905			/* stop all timers */
4906			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4907			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4908			net->flight_size = 0;
4909			net->partial_bytes_acked = 0;
4910		}
4911		asoc->total_flight = 0;
4912		asoc->total_flight_count = 0;
4913	}
4914	/**********************************/
4915	/* Now what about shutdown issues */
4916	/**********************************/
4917	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4918		/* nothing left on sendqueue.. consider done */
4919		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4920			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4921			    asoc->peers_rwnd, 0, 0, a_rwnd);
4922		}
4923		asoc->peers_rwnd = a_rwnd;
4924		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4925			/* SWS sender side engages */
4926			asoc->peers_rwnd = 0;
4927		}
4928		/* clean up */
4929		if ((asoc->stream_queue_cnt == 1) &&
4930		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4931		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4932		    (asoc->locked_on_sending)
4933		    ) {
4934			struct sctp_stream_queue_pending *sp;
4935
4936			/*
4937			 * I may be in a state where we got all across.. but
4938			 * cannot write more due to a shutdown... we abort
4939			 * since the user did not indicate EOR in this case.
4940			 */
4941			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4942			    sctp_streamhead);
4943			if ((sp) && (sp->length == 0)) {
4944				asoc->locked_on_sending = NULL;
4945				if (sp->msg_is_complete) {
4946					asoc->stream_queue_cnt--;
4947				} else {
4948					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4949					asoc->stream_queue_cnt--;
4950				}
4951			}
4952		}
4953		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4954		    (asoc->stream_queue_cnt == 0)) {
4955			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4956				/* Need to abort here */
4957				struct mbuf *oper;
4958
4959		abort_out_now:
4960				*abort_now = 1;
4961				/* XXX */
4962				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4963				    0, M_DONTWAIT, 1, MT_DATA);
4964				if (oper) {
4965					struct sctp_paramhdr *ph;
4966					uint32_t *ippp;
4967
4968					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4969					    sizeof(uint32_t);
4970					ph = mtod(oper, struct sctp_paramhdr *);
4971					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4972					ph->param_length = htons(SCTP_BUF_LEN(oper));
4973					ippp = (uint32_t *) (ph + 1);
4974					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4975				}
4976				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4977				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4978				return;
4979			} else {
4980				struct sctp_nets *netp;
4981
4982				if (asoc->alternate) {
4983					netp = asoc->alternate;
4984				} else {
4985					netp = asoc->primary_destination;
4986				}
4987				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4988				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4989					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4990				}
4991				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4992				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4993				sctp_stop_timers_for_shutdown(stcb);
4994				sctp_send_shutdown(stcb, netp);
4995				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4996				    stcb->sctp_ep, stcb, netp);
4997				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4998				    stcb->sctp_ep, stcb, netp);
4999			}
5000			return;
5001		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5002		    (asoc->stream_queue_cnt == 0)) {
5003			struct sctp_nets *netp;
5004
5005			if (asoc->alternate) {
5006				netp = asoc->alternate;
5007			} else {
5008				netp = asoc->primary_destination;
5009			}
5010			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5011				goto abort_out_now;
5012			}
5013			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5014			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5015			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5016			sctp_send_shutdown_ack(stcb, netp);
5017			sctp_stop_timers_for_shutdown(stcb);
5018			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5019			    stcb->sctp_ep, stcb, netp);
5020			return;
5021		}
5022	}
5023	/*
5024	 * Now here we are going to recycle net_ack for a different use...
5025	 * HEADS UP.
5026	 */
5027	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5028		net->net_ack = 0;
5029	}
5030
5031	/*
5032	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5033	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5034	 * automatically ensure that.
5035	 */
5036	if ((asoc->sctp_cmt_on_off > 0) &&
5037	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5038	    (cmt_dac_flag == 0)) {
5039		this_sack_lowest_newack = cum_ack;
5040	}
5041	if ((num_seg > 0) || (num_nr_seg > 0)) {
5042		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5043		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5044	}
5045	/* JRS - Use the congestion control given in the CC module */
5046	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5047
5048	/* Now are we exiting loss recovery ? */
5049	if (will_exit_fast_recovery) {
5050		/* Ok, we must exit fast recovery */
5051		asoc->fast_retran_loss_recovery = 0;
5052	}
5053	if ((asoc->sat_t3_loss_recovery) &&
5054	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5055		/* end satellite t3 loss recovery */
5056		asoc->sat_t3_loss_recovery = 0;
5057	}
5058	/*
5059	 * CMT Fast recovery
5060	 */
5061	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5062		if (net->will_exit_fast_recovery) {
5063			/* Ok, we must exit fast recovery */
5064			net->fast_retran_loss_recovery = 0;
5065		}
5066	}
5067
5068	/* Adjust and set the new rwnd value */
5069	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5070		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5071		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5072	}
5073	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5074	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5075	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5076		/* SWS sender side engages */
5077		asoc->peers_rwnd = 0;
5078	}
5079	if (asoc->peers_rwnd > old_rwnd) {
5080		win_probe_recovery = 1;
5081	}
5082	/*
5083	 * Now we must setup so we have a timer up for anyone with
5084	 * outstanding data.
5085	 */
5086	done_once = 0;
5087again:
5088	j = 0;
5089	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5090		if (win_probe_recovery && (net->window_probe)) {
5091			win_probe_recovered = 1;
5092			/*-
5093			 * Find first chunk that was used with
5094			 * window probe and clear the event. Put
5095			 * it back into the send queue as if has
5096			 * not been sent.
5097			 */
5098			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5099				if (tp1->window_probe) {
5100					sctp_window_probe_recovery(stcb, asoc, tp1);
5101					break;
5102				}
5103			}
5104		}
5105		if (net->flight_size) {
5106			j++;
5107			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5108				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5109				    stcb->sctp_ep, stcb, net);
5110			}
5111			if (net->window_probe) {
5112				net->window_probe = 0;
5113			}
5114		} else {
5115			if (net->window_probe) {
5116				/*
5117				 * In window probes we must assure a timer
5118				 * is still running there
5119				 */
5120				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5121					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5122					    stcb->sctp_ep, stcb, net);
5123
5124				}
5125			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5126				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5127				    stcb, net,
5128				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5129			}
5130		}
5131	}
5132	if ((j == 0) &&
5133	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5134	    (asoc->sent_queue_retran_cnt == 0) &&
5135	    (win_probe_recovered == 0) &&
5136	    (done_once == 0)) {
5137		/*
5138		 * huh, this should not happen unless all packets are
5139		 * PR-SCTP and marked to skip of course.
5140		 */
5141		if (sctp_fs_audit(asoc)) {
5142			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5143				net->flight_size = 0;
5144			}
5145			asoc->total_flight = 0;
5146			asoc->total_flight_count = 0;
5147			asoc->sent_queue_retran_cnt = 0;
5148			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5149				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5150					sctp_flight_size_increase(tp1);
5151					sctp_total_flight_increase(stcb, tp1);
5152				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5153					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5154				}
5155			}
5156		}
5157		done_once = 1;
5158		goto again;
5159	}
5160	/*********************************************/
5161	/* Here we perform PR-SCTP procedures        */
5162	/* (section 4.2)                             */
5163	/*********************************************/
5164	/* C1. update advancedPeerAckPoint */
5165	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5166		asoc->advanced_peer_ack_point = cum_ack;
5167	}
5168	/* C2. try to further move advancedPeerAckPoint ahead */
5169	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5170		struct sctp_tmit_chunk *lchk;
5171		uint32_t old_adv_peer_ack_point;
5172
5173		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5174		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5175		/* C3. See if we need to send a Fwd-TSN */
5176		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5177			/*
5178			 * ISSUE with ECN, see FWD-TSN processing.
5179			 */
5180			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5181				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5182				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5183				    old_adv_peer_ack_point);
5184			}
5185			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5186				send_forward_tsn(stcb, asoc);
5187			} else if (lchk) {
5188				/* try to FR fwd-tsn's that get lost too */
5189				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5190					send_forward_tsn(stcb, asoc);
5191				}
5192			}
5193		}
5194		if (lchk) {
5195			/* Assure a timer is up */
5196			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5197			    stcb->sctp_ep, stcb, lchk->whoTo);
5198		}
5199	}
5200	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5201		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5202		    a_rwnd,
5203		    stcb->asoc.peers_rwnd,
5204		    stcb->asoc.total_flight,
5205		    stcb->asoc.total_output_queue_size);
5206	}
5207}
5208
5209void
5210sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5211{
5212	/* Copy cum-ack */
5213	uint32_t cum_ack, a_rwnd;
5214
5215	cum_ack = ntohl(cp->cumulative_tsn_ack);
5216	/* Arrange so a_rwnd does NOT change */
5217	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5218
5219	/* Now call the express sack handling */
5220	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5221}
5222
5223static void
5224sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5225    struct sctp_stream_in *strmin)
5226{
5227	struct sctp_queued_to_read *ctl, *nctl;
5228	struct sctp_association *asoc;
5229	uint16_t tt;
5230
5231	asoc = &stcb->asoc;
5232	tt = strmin->last_sequence_delivered;
5233	/*
5234	 * First deliver anything prior to and including the stream no that
5235	 * came in
5236	 */
5237	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5238		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5239			/* this is deliverable now */
5240			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5241			/* subtract pending on streams */
5242			asoc->size_on_all_streams -= ctl->length;
5243			sctp_ucount_decr(asoc->cnt_on_all_streams);
5244			/* deliver it to at least the delivery-q */
5245			if (stcb->sctp_socket) {
5246				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5247				sctp_add_to_readq(stcb->sctp_ep, stcb,
5248				    ctl,
5249				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5250			}
5251		} else {
5252			/* no more delivery now. */
5253			break;
5254		}
5255	}
5256	/*
5257	 * now we must deliver things in queue the normal way  if any are
5258	 * now ready.
5259	 */
5260	tt = strmin->last_sequence_delivered + 1;
5261	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5262		if (tt == ctl->sinfo_ssn) {
5263			/* this is deliverable now */
5264			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5265			/* subtract pending on streams */
5266			asoc->size_on_all_streams -= ctl->length;
5267			sctp_ucount_decr(asoc->cnt_on_all_streams);
5268			/* deliver it to at least the delivery-q */
5269			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5270			if (stcb->sctp_socket) {
5271				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5272				sctp_add_to_readq(stcb->sctp_ep, stcb,
5273				    ctl,
5274				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5275
5276			}
5277			tt = strmin->last_sequence_delivered + 1;
5278		} else {
5279			break;
5280		}
5281	}
5282}
5283
5284static void
5285sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5286    struct sctp_association *asoc,
5287    uint16_t stream, uint16_t seq)
5288{
5289	struct sctp_tmit_chunk *chk, *nchk;
5290
5291	/* For each one on here see if we need to toss it */
5292	/*
5293	 * For now large messages held on the reasmqueue that are complete
5294	 * will be tossed too. We could in theory do more work to spin
5295	 * through and stop after dumping one msg aka seeing the start of a
5296	 * new msg at the head, and call the delivery function... to see if
5297	 * it can be delivered... But for now we just dump everything on the
5298	 * queue.
5299	 */
5300	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5301		/*
5302		 * Do not toss it if on a different stream or marked for
5303		 * unordered delivery in which case the stream sequence
5304		 * number has no meaning.
5305		 */
5306		if ((chk->rec.data.stream_number != stream) ||
5307		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5308			continue;
5309		}
5310		if (chk->rec.data.stream_seq == seq) {
5311			/* It needs to be tossed */
5312			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5313			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5314				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5315				asoc->str_of_pdapi = chk->rec.data.stream_number;
5316				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5317				asoc->fragment_flags = chk->rec.data.rcv_flags;
5318			}
5319			asoc->size_on_reasm_queue -= chk->send_size;
5320			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5321
5322			/* Clear up any stream problem */
5323			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5324			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5325				/*
5326				 * We must dump forward this streams
5327				 * sequence number if the chunk is not
5328				 * unordered that is being skipped. There is
5329				 * a chance that if the peer does not
5330				 * include the last fragment in its FWD-TSN
5331				 * we WILL have a problem here since you
5332				 * would have a partial chunk in queue that
5333				 * may not be deliverable. Also if a Partial
5334				 * delivery API as started the user may get
5335				 * a partial chunk. The next read returning
5336				 * a new chunk... really ugly but I see no
5337				 * way around it! Maybe a notify??
5338				 */
5339				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5340			}
5341			if (chk->data) {
5342				sctp_m_freem(chk->data);
5343				chk->data = NULL;
5344			}
5345			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5346		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5347			/*
5348			 * If the stream_seq is > than the purging one, we
5349			 * are done
5350			 */
5351			break;
5352		}
5353	}
5354}
5355
5356
5357void
5358sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5359    struct sctp_forward_tsn_chunk *fwd,
5360    int *abort_flag, struct mbuf *m, int offset)
5361{
5362	/* The pr-sctp fwd tsn */
5363	/*
5364	 * here we will perform all the data receiver side steps for
5365	 * processing FwdTSN, as required in by pr-sctp draft:
5366	 *
5367	 * Assume we get FwdTSN(x):
5368	 *
5369	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5370	 * others we have 3) examine and update re-ordering queue on
5371	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5372	 * report where we are.
5373	 */
5374	struct sctp_association *asoc;
5375	uint32_t new_cum_tsn, gap;
5376	unsigned int i, fwd_sz, m_size;
5377	uint32_t str_seq;
5378	struct sctp_stream_in *strm;
5379	struct sctp_tmit_chunk *chk, *nchk;
5380	struct sctp_queued_to_read *ctl, *sv;
5381
5382	asoc = &stcb->asoc;
5383	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5384		SCTPDBG(SCTP_DEBUG_INDATA1,
5385		    "Bad size too small/big fwd-tsn\n");
5386		return;
5387	}
5388	m_size = (stcb->asoc.mapping_array_size << 3);
5389	/*************************************************************/
5390	/* 1. Here we update local cumTSN and shift the bitmap array */
5391	/*************************************************************/
5392	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5393
5394	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5395		/* Already got there ... */
5396		return;
5397	}
5398	/*
5399	 * now we know the new TSN is more advanced, let's find the actual
5400	 * gap
5401	 */
5402	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5403	asoc->cumulative_tsn = new_cum_tsn;
5404	if (gap >= m_size) {
5405		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5406			struct mbuf *oper;
5407
5408			/*
5409			 * out of range (of single byte chunks in the rwnd I
5410			 * give out). This must be an attacker.
5411			 */
5412			*abort_flag = 1;
5413			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5414			    0, M_DONTWAIT, 1, MT_DATA);
5415			if (oper) {
5416				struct sctp_paramhdr *ph;
5417				uint32_t *ippp;
5418
5419				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5420				    (sizeof(uint32_t) * 3);
5421				ph = mtod(oper, struct sctp_paramhdr *);
5422				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5423				ph->param_length = htons(SCTP_BUF_LEN(oper));
5424				ippp = (uint32_t *) (ph + 1);
5425				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5426				ippp++;
5427				*ippp = asoc->highest_tsn_inside_map;
5428				ippp++;
5429				*ippp = new_cum_tsn;
5430			}
5431			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5432			sctp_abort_an_association(stcb->sctp_ep, stcb,
5433			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5434			return;
5435		}
5436		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5437
5438		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5439		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5440		asoc->highest_tsn_inside_map = new_cum_tsn;
5441
5442		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5443		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5444
5445		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5446			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5447		}
5448	} else {
5449		SCTP_TCB_LOCK_ASSERT(stcb);
5450		for (i = 0; i <= gap; i++) {
5451			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5452			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5453				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5454				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5455					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5456				}
5457			}
5458		}
5459	}
5460	/*************************************************************/
5461	/* 2. Clear up re-assembly queue                             */
5462	/*************************************************************/
5463	/*
5464	 * First service it if pd-api is up, just in case we can progress it
5465	 * forward
5466	 */
5467	if (asoc->fragmented_delivery_inprogress) {
5468		sctp_service_reassembly(stcb, asoc);
5469	}
5470	/* For each one on here see if we need to toss it */
5471	/*
5472	 * For now large messages held on the reasmqueue that are complete
5473	 * will be tossed too. We could in theory do more work to spin
5474	 * through and stop after dumping one msg aka seeing the start of a
5475	 * new msg at the head, and call the delivery function... to see if
5476	 * it can be delivered... But for now we just dump everything on the
5477	 * queue.
5478	 */
5479	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5480		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5481			/* It needs to be tossed */
5482			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5483			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5484				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5485				asoc->str_of_pdapi = chk->rec.data.stream_number;
5486				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5487				asoc->fragment_flags = chk->rec.data.rcv_flags;
5488			}
5489			asoc->size_on_reasm_queue -= chk->send_size;
5490			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5491
5492			/* Clear up any stream problem */
5493			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5494			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5495				/*
5496				 * We must dump forward this streams
5497				 * sequence number if the chunk is not
5498				 * unordered that is being skipped. There is
5499				 * a chance that if the peer does not
5500				 * include the last fragment in its FWD-TSN
5501				 * we WILL have a problem here since you
5502				 * would have a partial chunk in queue that
5503				 * may not be deliverable. Also if a Partial
5504				 * delivery API as started the user may get
5505				 * a partial chunk. The next read returning
5506				 * a new chunk... really ugly but I see no
5507				 * way around it! Maybe a notify??
5508				 */
5509				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5510			}
5511			if (chk->data) {
5512				sctp_m_freem(chk->data);
5513				chk->data = NULL;
5514			}
5515			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5516		} else {
5517			/*
5518			 * Ok we have gone beyond the end of the fwd-tsn's
5519			 * mark.
5520			 */
5521			break;
5522		}
5523	}
5524	/*******************************************************/
5525	/* 3. Update the PR-stream re-ordering queues and fix  */
5526	/* delivery issues as needed.                       */
5527	/*******************************************************/
5528	fwd_sz -= sizeof(*fwd);
5529	if (m && fwd_sz) {
5530		/* New method. */
5531		unsigned int num_str;
5532		struct sctp_strseq *stseq, strseqbuf;
5533
5534		offset += sizeof(*fwd);
5535
5536		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5537		num_str = fwd_sz / sizeof(struct sctp_strseq);
5538		for (i = 0; i < num_str; i++) {
5539			uint16_t st;
5540
5541			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5542			    sizeof(struct sctp_strseq),
5543			    (uint8_t *) & strseqbuf);
5544			offset += sizeof(struct sctp_strseq);
5545			if (stseq == NULL) {
5546				break;
5547			}
5548			/* Convert */
5549			st = ntohs(stseq->stream);
5550			stseq->stream = st;
5551			st = ntohs(stseq->sequence);
5552			stseq->sequence = st;
5553
5554			/* now process */
5555
5556			/*
5557			 * Ok we now look for the stream/seq on the read
5558			 * queue where its not all delivered. If we find it
5559			 * we transmute the read entry into a PDI_ABORTED.
5560			 */
5561			if (stseq->stream >= asoc->streamincnt) {
5562				/* screwed up streams, stop!  */
5563				break;
5564			}
5565			if ((asoc->str_of_pdapi == stseq->stream) &&
5566			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5567				/*
5568				 * If this is the one we were partially
5569				 * delivering now then we no longer are.
5570				 * Note this will change with the reassembly
5571				 * re-write.
5572				 */
5573				asoc->fragmented_delivery_inprogress = 0;
5574			}
5575			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5576			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5577				if ((ctl->sinfo_stream == stseq->stream) &&
5578				    (ctl->sinfo_ssn == stseq->sequence)) {
5579					str_seq = (stseq->stream << 16) | stseq->sequence;
5580					ctl->end_added = 1;
5581					ctl->pdapi_aborted = 1;
5582					sv = stcb->asoc.control_pdapi;
5583					stcb->asoc.control_pdapi = ctl;
5584					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5585					    stcb,
5586					    SCTP_PARTIAL_DELIVERY_ABORTED,
5587					    (void *)&str_seq,
5588					    SCTP_SO_NOT_LOCKED);
5589					stcb->asoc.control_pdapi = sv;
5590					break;
5591				} else if ((ctl->sinfo_stream == stseq->stream) &&
5592				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5593					/* We are past our victim SSN */
5594					break;
5595				}
5596			}
5597			strm = &asoc->strmin[stseq->stream];
5598			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5599				/* Update the sequence number */
5600				strm->last_sequence_delivered = stseq->sequence;
5601			}
5602			/* now kick the stream the new way */
5603			/* sa_ignore NO_NULL_CHK */
5604			sctp_kick_prsctp_reorder_queue(stcb, strm);
5605		}
5606		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5607	}
5608	/*
5609	 * Now slide thing forward.
5610	 */
5611	sctp_slide_mapping_arrays(stcb);
5612
5613	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5614		/* now lets kick out and check for more fragmented delivery */
5615		/* sa_ignore NO_NULL_CHK */
5616		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5617	}
5618}
5619