1/*	$NetBSD: sctp_indata.c,v 1.15 2024/02/09 18:20:00 andvar Exp $ */
2/*	$KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	*/
3
4/*
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: sctp_indata.c,v 1.15 2024/02/09 18:20:00 andvar Exp $");
35
36#ifdef _KERNEL_OPT
37#include "opt_ipsec.h"
38#include "opt_inet.h"
39#include "opt_sctp.h"
40#endif /* _KERNEL_OPT */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/socket.h>
47#include <sys/socketvar.h>
48#include <sys/sysctl.h>
49
50#include <net/if.h>
51#include <net/route.h>
52
53
54#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
55#include <sys/limits.h>
56#else
57#include <machine/limits.h>
58#endif
59#include <machine/cpu.h>
60
61#include <netinet/in.h>
62#include <netinet/in_systm.h>
63#include <netinet/ip.h>
64#ifdef INET6
65#include <netinet/ip6.h>
66#endif /* INET6 */
67#include <netinet/in_pcb.h>
68#include <netinet/in_var.h>
69#include <netinet/ip_var.h>
70#ifdef INET6
71#include <netinet6/ip6_var.h>
72#endif /* INET6 */
73#include <netinet/ip_icmp.h>
74#include <netinet/icmp_var.h>
75#include <netinet/sctp_var.h>
76#include <netinet/sctp_pcb.h>
77#include <netinet/sctp_header.h>
78#include <netinet/sctputil.h>
79#include <netinet/sctp_output.h>
80#include <netinet/sctp_input.h>
81#include <netinet/sctp_hashdriver.h>
82#include <netinet/sctp_indata.h>
83#include <netinet/sctp_uio.h>
84#include <netinet/sctp_timer.h>
85#ifdef IPSEC
86#include <netipsec/ipsec.h>
87#include <netipsec/key.h>
88#endif /*IPSEC*/
89
90#ifdef SCTP_DEBUG
91extern u_int32_t sctp_debug_on;
92#endif
93
94/*
95 * NOTES: On the outbound side of things I need to check the sack timer to
96 * see if I should generate a sack into the chunk queue (if I have data to
97 * send that is and will be sending it .. for bundling.
98 *
99 * The callback in sctp_usrreq.c will get called when the socket is read
100 * from. This will cause sctp_service_queues() to get called on the top
101 * entry in the list.
102 */
103
104extern int sctp_strict_sacks;
105
106void
107sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
108{
109	u_int32_t calc, calc_w_oh;
110
111#ifdef SCTP_DEBUG
112	if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
113		printf("cc:%lu hiwat:%lu lowat:%lu mbcnt:%lu mbmax:%lu\n",
114		       (u_long)stcb->sctp_socket->so_rcv.sb_cc,
115		       (u_long)stcb->sctp_socket->so_rcv.sb_hiwat,
116		       (u_long)stcb->sctp_socket->so_rcv.sb_lowat,
117		       (u_long)stcb->sctp_socket->so_rcv.sb_mbcnt,
118		       (u_long)stcb->sctp_socket->so_rcv.sb_mbmax);
119		printf("Setting rwnd to: sb:%ld - (del:%d + reasm:%d str:%d)\n",
120		       sctp_sbspace(&stcb->sctp_socket->so_rcv),
121		       asoc->size_on_delivery_queue,
122		       asoc->size_on_reasm_queue,
123		       asoc->size_on_all_streams);
124	}
125#endif
126	if (stcb->sctp_socket->so_rcv.sb_cc == 0 &&
127	    asoc->size_on_delivery_queue == 0 &&
128	    asoc->size_on_reasm_queue == 0 &&
129	    asoc->size_on_all_streams == 0) {
130		/* Full rwnd granted */
131		asoc->my_rwnd = uimax(stcb->sctp_socket->so_rcv.sb_hiwat,
132				    SCTP_MINIMAL_RWND);
133		return;
134	}
135	/* get actual space */
136	calc = (u_int32_t)sctp_sbspace(&stcb->sctp_socket->so_rcv);
137
138	/* take out what has NOT been put on socket queue and
139	 * we yet hold for putting up.
140	 */
141	calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_delivery_queue);
142	calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_reasm_queue);
143	calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_all_streams);
144
145	/* what is the overhead of all these rwnd's */
146	calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
147
148	asoc->my_rwnd = calc;
149	if (calc_w_oh == 0) {
150		/* If our overhead is greater than the advertised
151		 * rwnd, we clamp the rwnd to 1. This lets us
152		 * still accept inbound segments, but hopefully will
153		 * shut the sender down when he finally gets the message.
154		 */
155 		asoc->my_rwnd = 1;
156	} else {
157		/* SWS threshold */
158		if (asoc->my_rwnd &&
159		    (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
160			/* SWS engaged, tell peer none left */
161			asoc->my_rwnd = 1;
162#ifdef SCTP_DEBUG
163			if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
164				printf(" - SWS zeros\n");
165			}
166		} else {
167			if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
168				printf("\n");
169			}
170#endif
171		}
172	}
173}
174
175/*
176 * Take a chk structure and build it into an mbuf. Hmm should we change things
177 * so that instead we store the data side in a chunk?
178 */
179static struct mbuf *
180sctp_build_ctl_nchunk(struct sctp_tcb *stcb, uint32_t tsn, uint32_t ppid,
181    uint32_t context, uint16_t stream_no, uint16_t stream_seq, uint8_t flags)
182{
183	struct sctp_sndrcvinfo *outinfo;
184	struct cmsghdr *cmh;
185	struct mbuf *ret;
186
187	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
188		/* user does not want the sndrcv ctl */
189		return (NULL);
190	}
191
192	MGETHDR(ret, M_DONTWAIT, MT_CONTROL);
193	if (ret == NULL) {
194		/* No space */
195		return (ret);
196	}
197	/* We need a CMSG header followed by the struct  */
198	cmh = mtod(ret, struct cmsghdr *);
199	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
200	cmh->cmsg_level = IPPROTO_SCTP;
201	cmh->cmsg_type = SCTP_SNDRCV;
202	cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
203	outinfo->sinfo_stream = stream_no;
204	outinfo->sinfo_ssn = stream_seq;
205	if (flags & SCTP_DATA_UNORDERED) {
206		outinfo->sinfo_flags = SCTP_UNORDERED;
207	} else {
208		outinfo->sinfo_flags = 0;
209	}
210	outinfo->sinfo_ppid = ppid;
211	outinfo->sinfo_context = context;
212	outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
213	outinfo->sinfo_tsn = tsn;
214	outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
215	ret->m_len = cmh->cmsg_len;
216	ret->m_pkthdr.len = ret->m_len;
217	/*
218	 * We track how many control len's have gone upon the sb
219	 * and do not count these in the rwnd calculation.
220	 */
221	stcb->asoc.my_rwnd_control_len +=
222	    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
223
224	return (ret);
225}
226
227/*
228 * Take a chk structure and build it into an mbuf.  Should we change things
229 * so that instead we store the data side in a chunk?
230 */
231static
232struct mbuf *
233sctp_build_ctl(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk)
234{
235	struct sctp_sndrcvinfo *outinfo;
236	struct cmsghdr *cmh;
237	struct mbuf *ret;
238	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
239		/* user does not want the sndrcv ctl */
240		return (NULL);
241	}
242	MGET(ret, M_DONTWAIT, MT_CONTROL);
243	if (ret == NULL) {
244		/* No space */
245		return (ret);
246	}
247
248	/* We need a CMSG header followed by the struct  */
249	cmh = mtod(ret, struct cmsghdr *);
250	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
251	cmh->cmsg_level = IPPROTO_SCTP;
252	cmh->cmsg_type = SCTP_SNDRCV;
253	cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
254	outinfo->sinfo_stream = chk->rec.data.stream_number;
255	outinfo->sinfo_ssn = chk->rec.data.stream_seq;
256	if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
257		outinfo->sinfo_flags = SCTP_UNORDERED;
258	} else {
259		outinfo->sinfo_flags = 0;
260	}
261	outinfo->sinfo_ppid = chk->rec.data.payloadtype;
262	outinfo->sinfo_context = chk->rec.data.context;
263	outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
264	outinfo->sinfo_tsn = chk->rec.data.TSN_seq;
265	outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
266	ret->m_len = cmh->cmsg_len;
267	stcb->asoc.my_rwnd_control_len +=
268	    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
269
270	return (ret);
271}
272
273int
274sctp_deliver_data(struct sctp_tcb *stcb, struct sctp_association *asoc,
275    struct sctp_tmit_chunk *chk, int hold_locks)
276{
277	struct mbuf *control, *m;
278	int free_it;
279	struct sockaddr_in6 sin6;
280	const struct sockaddr *to;
281
282#ifdef SCTP_DEBUG
283	if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
284		printf("I am now in Deliver data! (%p)\n", chk);
285	}
286#endif
287	/* get a write lock on the inp if not already */
288	if (hold_locks == 0) {
289		SCTP_TCB_UNLOCK(stcb);
290		SCTP_INP_WLOCK(stcb->sctp_ep);
291		SCTP_TCB_LOCK(stcb);
292	}
293	free_it = 0;
294	/* We always add it to the queue */
295	if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
296		/* socket above is long gone */
297#ifdef SCTP_DEBUG
298		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
299			printf("gone is gone!\n");
300		}
301#endif
302		if (chk != NULL) {
303			if (chk->data)
304				sctp_m_freem(chk->data);
305			chk->data = NULL;
306			sctp_free_remote_addr(chk->whoTo);
307			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
308			sctppcbinfo.ipi_count_chunk--;
309			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
310				panic("Chunk count is negative");
311			}
312			sctppcbinfo.ipi_gencnt_chunk++;
313		}
314		TAILQ_FOREACH(chk, &asoc->delivery_queue, sctp_next) {
315			asoc->size_on_delivery_queue -= chk->send_size;
316			asoc->cnt_on_delivery_queue--;
317			/*
318			 * Lose the data pointer, since its in the socket buffer
319			 */
320			if (chk->data)
321				sctp_m_freem(chk->data);
322			chk->data = NULL;
323			/* Now free the address and data */
324			sctp_free_remote_addr(chk->whoTo);
325			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
326			sctppcbinfo.ipi_count_chunk--;
327			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
328				panic("Chunk count is negative");
329			}
330			sctppcbinfo.ipi_gencnt_chunk++;
331		}
332		if (hold_locks == 0) {
333			SCTP_INP_WUNLOCK(stcb->sctp_ep);
334		}
335		return (0);
336	}
337	if (chk != NULL) {
338		TAILQ_INSERT_TAIL(&asoc->delivery_queue, chk, sctp_next);
339		asoc->size_on_delivery_queue += chk->send_size;
340		asoc->cnt_on_delivery_queue++;
341	}
342	if (asoc->fragmented_delivery_inprogress) {
343		/*
344		 * oh oh, fragmented delivery in progress
345		 * return out of here.
346		 */
347#ifdef SCTP_DEBUG
348		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
349			printf("Fragmented delivery in progress?\n");
350		}
351#endif
352		if (hold_locks == 0) {
353			SCTP_INP_WUNLOCK(stcb->sctp_ep);
354		}
355		return (0);
356	}
357	/* Now grab the first one  */
358	chk = TAILQ_FIRST(&asoc->delivery_queue);
359	if (chk == NULL) {
360		/* Nothing in queue */
361#ifdef SCTP_DEBUG
362		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
363			printf("Nothing in queue?\n");
364		}
365#endif
366		asoc->size_on_delivery_queue = 0;
367		asoc->cnt_on_delivery_queue = 0;
368		if (hold_locks == 0) {
369			SCTP_INP_WUNLOCK(stcb->sctp_ep);
370		}
371		return (0);
372	}
373
374	if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) {
375		/* Boy, there really is NO room */
376		if (hold_locks == 0) {
377			SCTP_INP_WUNLOCK(stcb->sctp_ep);
378		}
379		return (0);
380	}
381#ifdef SCTP_DEBUG
382	if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
383		printf("Now to the delivery with chk(%p)!\n", chk);
384	}
385#endif
386	/* XXX need to append PKTHDR to the socket buffer first */
387	if ((chk->data->m_flags & M_PKTHDR) == 0) {
388		MGETHDR(m, M_DONTWAIT, MT_DATA);
389		if (m == NULL) {
390			/* no room! */
391			if (hold_locks == 0) {
392				SCTP_INP_WUNLOCK(stcb->sctp_ep);
393			}
394			return (0);
395		}
396		m->m_pkthdr.len = chk->send_size;
397		m->m_len = 0;
398		m->m_next = chk->data;
399		chk->data = m;
400	}
401	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
402		if (chk->data->m_next == NULL) {
403			/* hopefully we hit here most of the time */
404			chk->data->m_flags |= M_EOR;
405		} else {
406			/* Add the flag to the LAST mbuf in the chain */
407			m = chk->data;
408			while (m->m_next != NULL) {
409				m = m->m_next;
410			}
411			m->m_flags |= M_EOR;
412		}
413	}
414
415	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
416		struct sockaddr_in6 lsa6;
417
418		control = sctp_build_ctl(stcb, chk);
419		to = rtcache_getdst(&chk->whoTo->ro);
420		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
421		    to->sa_family == AF_INET) {
422			const struct sockaddr_in *sin;
423
424			sin = (const struct sockaddr_in *)to;
425			in6_sin_2_v4mapsin6(sin, &sin6);
426			to = (struct sockaddr *)&sin6;
427		}
428		/* check and strip embedded scope junk */
429		to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
430		    &lsa6);
431		if (((const struct sockaddr_in *)to)->sin_port == 0) {
432			printf("Huh a, port is %d not net:%p %d?\n",
433			       ((const struct sockaddr_in *)to)->sin_port,
434			       chk->whoTo,
435			       (int)(ntohs(stcb->rport)));
436			/*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
437			/* XXX */
438		}
439		if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < (long)chk->send_size) {
440			/* Gak not enough room */
441			if (control) {
442				sctp_m_freem(control);
443				stcb->asoc.my_rwnd_control_len -=
444				    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
445			}
446			goto skip;
447		}
448		if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
449		    to, chk->data, control, stcb->asoc.my_vtag,
450		    stcb->sctp_ep)) {
451			/* Gak not enough room */
452			if (control) {
453				sctp_m_freem(control);
454				stcb->asoc.my_rwnd_control_len -=
455				    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
456			}
457		} else {
458			if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
459				if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
460					stcb->asoc.my_rwnd_control_len +=
461						sizeof(struct mbuf);
462				}
463			} else {
464				stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
465			}
466			free_it = 1;
467		}
468	} else {
469		/* append to a already started message. */
470		if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
471		    (long)chk->send_size) {
472			sbappend(&stcb->sctp_socket->so_rcv, chk->data);
473			free_it = 1;
474		}
475	}
476 skip:
477	if (hold_locks == 0) {
478		SCTP_INP_WUNLOCK(stcb->sctp_ep);
479	}
480	/* free up the one we inserted */
481	if (free_it) {
482		/* Pull it off the queue */
483#ifdef SCTP_DEBUG
484		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
485			printf("Free_it true, doing tickle wakeup\n");
486		}
487#endif
488		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
489		TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
490		asoc->size_on_delivery_queue -= chk->send_size;
491		asoc->cnt_on_delivery_queue--;
492		/* Lose the data pointer, since its in the socket buffer */
493		chk->data = NULL;
494		/* Now free the address and data */
495		sctp_free_remote_addr(chk->whoTo);
496		SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
497		sctppcbinfo.ipi_count_chunk--;
498		if ((int)sctppcbinfo.ipi_count_chunk < 0) {
499			panic("Chunk count is negative");
500		}
501		sctppcbinfo.ipi_gencnt_chunk++;
502	}
503	return (free_it);
504}
505
506/*
507 * We are delivering currently from the reassembly queue. We must continue to
508 * deliver until we either:
509 * 1) run out of space.
510 * 2) run out of sequential TSN's
511 * 3) hit the SCTP_DATA_LAST_FRAG flag.
512 */
513static void
514sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
515{
516	const struct sockaddr *to;
517	struct sockaddr_in6 sin6;
518	struct sctp_tmit_chunk *chk, *at;
519	struct mbuf *control, *m;
520	u_int16_t nxt_todel;
521	u_int16_t stream_no;
522	int cntDel;
523	cntDel = stream_no = 0;
524	if (hold_locks == 0) {
525		/*
526		 * you always have the TCB lock, we need
527		 * to have the inp write lock as well.
528		 */
529		SCTP_TCB_UNLOCK(stcb);
530		SCTP_INP_WLOCK(stcb->sctp_ep);
531		SCTP_TCB_LOCK(stcb);
532	}
533	if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
534		/* socket above is long gone */
535		asoc->fragmented_delivery_inprogress = 0;
536		TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
537			asoc->size_on_delivery_queue -= chk->send_size;
538			asoc->cnt_on_delivery_queue--;
539			/*
540			 * Lose the data pointer, since its in the socket buffer
541			 */
542			if (chk->data)
543				sctp_m_freem(chk->data);
544			chk->data = NULL;
545			/* Now free the address and data */
546			sctp_free_remote_addr(chk->whoTo);
547			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
548			sctppcbinfo.ipi_count_chunk--;
549			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
550				panic("Chunk count is negative");
551			}
552			sctppcbinfo.ipi_gencnt_chunk++;
553		}
554		if (hold_locks == 0) {
555			SCTP_INP_WUNLOCK(stcb->sctp_ep);
556		}
557		return;
558	}
559	do {
560		if (stcb->sctp_socket->so_rcv.sb_cc >=
561		    stcb->sctp_socket->so_rcv.sb_hiwat) {
562			if (cntDel) {
563				sctp_sorwakeup(stcb->sctp_ep,
564					       stcb->sctp_socket);
565			}
566			if (hold_locks == 0) {
567				SCTP_INP_WUNLOCK(stcb->sctp_ep);
568			}
569			return;
570		}
571		chk = TAILQ_FIRST(&asoc->reasmqueue);
572		if (chk == NULL) {
573			if (cntDel) {
574				sctp_sorwakeup(stcb->sctp_ep,
575					       stcb->sctp_socket);
576			}
577			if (hold_locks == 0) {
578				SCTP_INP_WUNLOCK(stcb->sctp_ep);
579			}
580			return;
581		}
582		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
583			/* Can't deliver more :< */
584			if (cntDel) {
585				sctp_sorwakeup(stcb->sctp_ep,
586					       stcb->sctp_socket);
587			}
588			if (hold_locks == 0) {
589				SCTP_INP_WUNLOCK(stcb->sctp_ep);
590			}
591			return;
592		}
593		stream_no = chk->rec.data.stream_number;
594		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
595		if (nxt_todel != chk->rec.data.stream_seq &&
596		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
597			/*
598			 * Not the next sequence to deliver in its stream OR
599			 * unordered
600			 */
601			if (cntDel) {
602				sctp_sorwakeup(stcb->sctp_ep,
603					       stcb->sctp_socket);
604			}
605			if (hold_locks == 0) {
606				SCTP_INP_WUNLOCK(stcb->sctp_ep);
607			}
608			return;
609		}
610
611		if ((chk->data->m_flags & M_PKTHDR) == 0) {
612			MGETHDR(m, M_DONTWAIT, MT_DATA);
613			if (m == NULL) {
614				/* no room! */
615				if (hold_locks == 0) {
616					SCTP_INP_WUNLOCK(stcb->sctp_ep);
617				}
618				return;
619			}
620			m->m_pkthdr.len = chk->send_size;
621			m->m_len = 0;
622			m->m_next = chk->data;
623			chk->data = m;
624		}
625		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
626			if (chk->data->m_next == NULL) {
627				/* hopefully we hit here most of the time */
628				chk->data->m_flags |= M_EOR;
629			} else {
630				/* Add the flag to the LAST mbuf in the chain */
631				m = chk->data;
632				while (m->m_next != NULL) {
633					m = m->m_next;
634				}
635				m->m_flags |= M_EOR;
636			}
637		}
638		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
639			struct sockaddr_in6 lsa6;
640
641			control = sctp_build_ctl(stcb, chk);
642			to = rtcache_getdst(&chk->whoTo->ro);
643			if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
644			    to->sa_family == AF_INET) {
645				const struct sockaddr_in *sin;
646
647				sin = satocsin(to);
648				in6_sin_2_v4mapsin6(sin, &sin6);
649				to = (struct sockaddr *)&sin6;
650			}
651			/* check and strip embedded scope junk */
652			to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
653								   &lsa6);
654			if (((const struct sockaddr_in *)to)->sin_port == 0) {
655				printf("Huh b, port is %d not net:%p %d?\n",
656				       ((const struct sockaddr_in *)to)->sin_port,
657				       chk->whoTo,
658				       (int)(ntohs(stcb->rport)));
659				/*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
660				/* XXX */
661			}
662			if (sctp_sbspace(&stcb->sctp_socket->so_rcv) <
663			    (long)chk->send_size) {
664				if (control) {
665					sctp_m_freem(control);
666					stcb->asoc.my_rwnd_control_len -=
667						CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
668				}
669				sctp_sorwakeup(stcb->sctp_ep,
670					       stcb->sctp_socket);
671				if (hold_locks == 0) {
672					SCTP_INP_WUNLOCK(stcb->sctp_ep);
673				}
674				return;
675			}
676			if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
677						  to, chk->data, control, stcb->asoc.my_vtag,
678						  stcb->sctp_ep)) {
679				/* Gak not enough room */
680				if (control) {
681					sctp_m_freem(control);
682					stcb->asoc.my_rwnd_control_len -=
683						CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
684				}
685				sctp_sorwakeup(stcb->sctp_ep,
686					       stcb->sctp_socket);
687				if (hold_locks == 0) {
688					SCTP_INP_WUNLOCK(stcb->sctp_ep);
689				}
690				return;
691			}
692			if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
693				if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
694					stcb->asoc.my_rwnd_control_len +=
695						sizeof(struct mbuf);
696				}
697			} else {
698				stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
699			}
700			cntDel++;
701		} else {
702			if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
703			    (long)chk->send_size) {
704				sbappend(&stcb->sctp_socket->so_rcv, chk->data);
705				cntDel++;
706			} else {
707				/* out of space in the sb */
708				sctp_sorwakeup(stcb->sctp_ep,
709					       stcb->sctp_socket);
710				if (hold_locks == 0) {
711					SCTP_INP_WUNLOCK(stcb->sctp_ep);
712				}
713				return;
714			}
715		}
716		/* pull it we did it */
717		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
718		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
719			asoc->fragmented_delivery_inprogress = 0;
720			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
721				asoc->strmin[stream_no].last_sequence_delivered++;
722			}
723		}
724		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
725		asoc->size_on_reasm_queue -= chk->send_size;
726		asoc->cnt_on_reasm_queue--;
727		/* free up the chk */
728		sctp_free_remote_addr(chk->whoTo);
729		chk->data = NULL;
730		SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
731		sctppcbinfo.ipi_count_chunk--;
732		if ((int)sctppcbinfo.ipi_count_chunk < 0) {
733			panic("Chunk count is negative");
734		}
735		sctppcbinfo.ipi_gencnt_chunk++;
736		if (asoc->fragmented_delivery_inprogress == 0) {
737			/*
738			 * Now lets see if we can deliver the next one on the
739			 * stream
740			 */
741			/*u_int16_t nxt_todel;*/
742			struct sctp_stream_in *strm;
743
744			strm = &asoc->strmin[stream_no];
745			nxt_todel = strm->last_sequence_delivered + 1;
746			chk = TAILQ_FIRST(&strm->inqueue);
747			if (chk && (nxt_todel == chk->rec.data.stream_seq)) {
748				while (chk != NULL) {
749					/* all delivered */
750					if (nxt_todel ==
751					    chk->rec.data.stream_seq) {
752						at = TAILQ_NEXT(chk, sctp_next);
753						TAILQ_REMOVE(&strm->inqueue,
754							     chk, sctp_next);
755						asoc->size_on_all_streams -=
756							chk->send_size;
757						asoc->cnt_on_all_streams--;
758						strm->last_sequence_delivered++;
759						/*
760						 * We ignore the return of
761						 * deliver_data here since we
762						 * always can hold the chunk on
763						 * the d-queue. And we have a
764						 * finite number that can be
765						 * delivered from the strq.
766						 */
767						sctp_deliver_data(stcb, asoc, chk, 1);
768						chk = at;
769					} else {
770						break;
771					}
772					nxt_todel =
773						strm->last_sequence_delivered + 1;
774				}
775			}
776			if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
777				/* Here if deliver_data fails, we must break */
778				if (sctp_deliver_data(stcb, asoc, NULL, 1) == 0)
779					break;
780			}
781			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
782			if (hold_locks == 0) {
783				SCTP_INP_WUNLOCK(stcb->sctp_ep);
784			}
785			return;
786		}
787		chk = TAILQ_FIRST(&asoc->reasmqueue);
788	} while (chk);
789	if (cntDel) {
790		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
791	}
792	if (hold_locks == 0) {
793		SCTP_INP_WUNLOCK(stcb->sctp_ep);
794	}
795}
796
797/*
798 * Queue the chunk either right into the socket buffer if it is the next one
799 * to go OR put it in the correct place in the delivery queue.  If we do
800 * append to the so_buf, keep doing so until we are out of order.
801 * One big question still remains, what to do when the socket buffer is FULL??
802 */
803static void
804sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
805    struct sctp_tmit_chunk *chk, int *abort_flag)
806{
807	struct sctp_stream_in *strm;
808	struct sctp_tmit_chunk *at;
809	int queue_needed;
810	u_int16_t nxt_todel;
811	struct mbuf *oper;
812
813/*** FIX FIX FIX ???
814 * Need to add code to deal with 16 bit seq wrap
815 * without a TSN wrap for ordered delivery (maybe).
816 * FIX FIX FIX ???
817 */
818	queue_needed = 1;
819	asoc->size_on_all_streams += chk->send_size;
820	asoc->cnt_on_all_streams++;
821	strm = &asoc->strmin[chk->rec.data.stream_number];
822	nxt_todel = strm->last_sequence_delivered + 1;
823#ifdef SCTP_STR_LOGGING
824	sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
825#endif
826#ifdef SCTP_DEBUG
827	if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
828		printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
829		    (u_int)chk->rec.data.stream_seq,
830		    (u_int)strm->last_sequence_delivered, (u_int)nxt_todel);
831	}
832#endif
833	if (compare_with_wrap(strm->last_sequence_delivered,
834	    chk->rec.data.stream_seq, MAX_SEQ) ||
835	    (strm->last_sequence_delivered == chk->rec.data.stream_seq)) {
836		/* The incoming sseq is behind where we last delivered? */
837#ifdef SCTP_DEBUG
838		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
839			printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
840			    chk->rec.data.stream_seq,
841			    strm->last_sequence_delivered);
842		}
843#endif
844		/*
845		 * throw it in the stream so it gets cleaned up in
846		 * association destruction
847		 */
848		TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
849		MGET(oper, M_DONTWAIT, MT_DATA);
850		if (oper) {
851			struct sctp_paramhdr *ph;
852			u_int32_t *ippp;
853
854			oper->m_len = sizeof(struct sctp_paramhdr) +
855			    sizeof(*ippp);
856			ph = mtod(oper, struct sctp_paramhdr *);
857			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
858			ph->param_length = htons(oper->m_len);
859			ippp = (u_int32_t *)(ph + 1);
860			*ippp = htonl(0x00000001);
861		}
862		sctp_abort_an_association(stcb->sctp_ep, stcb,
863		    SCTP_PEER_FAULTY, oper);
864
865		*abort_flag = 1;
866		return;
867
868	}
869	if (nxt_todel == chk->rec.data.stream_seq) {
870		/* can be delivered right away */
871#ifdef SCTP_DEBUG
872		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
873			printf("It's NEXT!\n");
874		}
875#endif
876#ifdef SCTP_STR_LOGGING
877		sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
878#endif
879		queue_needed = 0;
880		asoc->size_on_all_streams -= chk->send_size;
881		asoc->cnt_on_all_streams--;
882		strm->last_sequence_delivered++;
883		sctp_deliver_data(stcb, asoc, chk, 0);
884		chk = TAILQ_FIRST(&strm->inqueue);
885		while (chk != NULL) {
886			/* all delivered */
887			nxt_todel = strm->last_sequence_delivered + 1;
888			if (nxt_todel == chk->rec.data.stream_seq) {
889				at = TAILQ_NEXT(chk, sctp_next);
890				TAILQ_REMOVE(&strm->inqueue, chk, sctp_next);
891				asoc->size_on_all_streams -= chk->send_size;
892				asoc->cnt_on_all_streams--;
893				strm->last_sequence_delivered++;
894				/*
895				 * We ignore the return of deliver_data here
896				 * since we always can hold the chunk on the
897				 * d-queue. And we have a finite number that
898				 * can be delivered from the strq.
899				 */
900#ifdef SCTP_STR_LOGGING
901				sctp_log_strm_del(chk, NULL,
902				    SCTP_STR_LOG_FROM_IMMED_DEL);
903#endif
904				sctp_deliver_data(stcb, asoc, chk, 0);
905				chk = at;
906				continue;
907			}
908			break;
909		}
910	}
911	if (queue_needed) {
912		/*
913		 * Ok, we did not deliver this guy, find
914		 * the correct place to put it on the queue.
915		 */
916#ifdef SCTP_DEBUG
917		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
918			printf("Queue Needed!\n");
919		}
920#endif
921		if (TAILQ_EMPTY(&strm->inqueue)) {
922			/* Empty queue */
923#ifdef SCTP_STR_LOGGING
924			sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
925#endif
926			TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
927		} else {
928			TAILQ_FOREACH(at, &strm->inqueue, sctp_next) {
929				if (compare_with_wrap(at->rec.data.stream_seq,
930				    chk->rec.data.stream_seq, MAX_SEQ)) {
931					/*
932					 * one in queue is bigger than the new
933					 * one, insert before this one
934					 */
935#ifdef SCTP_STR_LOGGING
936					sctp_log_strm_del(chk, at,
937					    SCTP_STR_LOG_FROM_INSERT_MD);
938#endif
939					TAILQ_INSERT_BEFORE(at, chk, sctp_next);
940					break;
941				} else if (at->rec.data.stream_seq ==
942				    chk->rec.data.stream_seq) {
943					/*
944					 * Gak, He sent me a duplicate str seq
945					 * number
946					 */
947					/*
948					 * foo bar, I guess I will just free
949					 * this new guy, should we abort too?
950					 * FIX ME MAYBE? Or it COULD be that
951					 * the SSN's have wrapped. Maybe I
952					 * should compare to TSN somehow...
953					 * sigh for now just blow away the
954					 * chunk!
955					 */
956
957					if (chk->data)
958						sctp_m_freem(chk->data);
959					chk->data = NULL;
960					asoc->size_on_all_streams -= chk->send_size;
961					asoc->cnt_on_all_streams--;
962					sctp_pegs[SCTP_DUP_SSN_RCVD]++;
963					sctp_free_remote_addr(chk->whoTo);
964					SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
965					sctppcbinfo.ipi_count_chunk--;
966					if ((int)sctppcbinfo.ipi_count_chunk <
967					    0) {
968						panic("Chunk count is negative");
969					}
970					sctppcbinfo.ipi_gencnt_chunk++;
971					return;
972				} else {
973					if (TAILQ_NEXT(at, sctp_next) == NULL) {
974						/*
975						 * We are at the end, insert it
976						 * after this one
977						 */
978#ifdef SCTP_STR_LOGGING
979						sctp_log_strm_del(chk, at,
980						    SCTP_STR_LOG_FROM_INSERT_TL);
981#endif
982						TAILQ_INSERT_AFTER(&strm->inqueue,
983						    at, chk, sctp_next);
984						break;
985					}
986				}
987			}
988		}
989	} else {
990		/* We delivered some chunks, wake them up */
991
992#ifdef SCTP_DEBUG
993		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
994			printf("Doing WAKEUP!\n");
995		}
996#endif
997		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
998	}
999}
1000
1001/*
1002 * Returns two things: You get the total size of the deliverable parts of the
1003 * first fragmented message on the reassembly queue. And you get a 1 back if
1004 * all of the message is ready or a 0 back if the message is still incomplete
1005 */
1006static int
1007sctp_is_all_msg_on_reasm(struct sctp_association *asoc, int *t_size)
1008{
1009	struct sctp_tmit_chunk *chk;
1010	u_int32_t tsn;
1011
1012	*t_size = 0;
1013	chk = TAILQ_FIRST(&asoc->reasmqueue);
1014	if (chk == NULL) {
1015		/* nothing on the queue */
1016		return (0);
1017	}
1018	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1019		/* Not a first on the queue */
1020		return (0);
1021	}
1022	tsn = chk->rec.data.TSN_seq;
1023	while (chk) {
1024		if (tsn != chk->rec.data.TSN_seq) {
1025			return (0);
1026		}
1027		*t_size += chk->send_size;
1028		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1029			return (1);
1030		}
1031		tsn++;
1032		chk = TAILQ_NEXT(chk, sctp_next);
1033	}
1034	return (0);
1035}
1036
1037/*
1038 * Dump onto the re-assembly queue, in its proper place. After dumping on
1039 * the queue, see if anything can be delivered. If so pull it off (or as much
1040 * as we can. If we run out of space then we must dump what we can and set
1041 * the appropriate flag to say we queued what we could.
1042 */
1043static void
1044sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1045    struct sctp_tmit_chunk *chk, int *abort_flag)
1046{
1047	struct mbuf *oper;
1048	u_int16_t nxt_todel;
1049	u_int32_t cum_ackp1, prev_tsn, post_tsn;
1050	int tsize;
1051	struct sctp_tmit_chunk *at, *prev, *next;
1052
1053	prev = next = NULL;
1054	cum_ackp1 = asoc->tsn_last_delivered + 1;
1055
1056	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
1057		/* This is the first one on the queue */
1058		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
1059		/*
1060		 * we do not check for delivery of anything when
1061		 * only one fragment is here
1062		 */
1063		asoc->size_on_reasm_queue = chk->send_size;
1064		asoc->cnt_on_reasm_queue++;
1065		if (chk->rec.data.TSN_seq == cum_ackp1) {
1066			if (asoc->fragmented_delivery_inprogress == 0  &&
1067			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
1068			    SCTP_DATA_FIRST_FRAG) {
1069				/*
1070				 * An empty queue, no delivery inprogress, we
1071				 * hit the next one and it does NOT have a
1072				 * FIRST fragment mark.
1073				 */
1074#ifdef SCTP_DEBUG
1075				if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1076					printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1077				}
1078#endif
1079				MGET(oper, M_DONTWAIT, MT_DATA);
1080				if (oper) {
1081					struct sctp_paramhdr *ph;
1082					u_int32_t *ippp;
1083
1084					oper->m_len =
1085					    sizeof(struct sctp_paramhdr) +
1086					    sizeof(*ippp);
1087					ph = mtod(oper, struct sctp_paramhdr *);
1088					ph->param_type =
1089					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1090					ph->param_length = htons(oper->m_len);
1091					ippp = (u_int32_t *)(ph + 1);
1092					*ippp = htonl(0x10000001);
1093				}
1094				sctp_abort_an_association(stcb->sctp_ep, stcb,
1095				    SCTP_PEER_FAULTY, oper);
1096				*abort_flag = 1;
1097			} else if (asoc->fragmented_delivery_inprogress &&
1098			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1099				/*
1100				 * We are doing a partial delivery and the NEXT
1101				 * chunk MUST be either the LAST or MIDDLE
1102				 * fragment NOT a FIRST
1103				 */
1104#ifdef SCTP_DEBUG
1105				if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1106					printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1107				}
1108#endif
1109				MGET(oper, M_DONTWAIT, MT_DATA);
1110				if (oper) {
1111					struct sctp_paramhdr *ph;
1112					u_int32_t *ippp;
1113
1114					oper->m_len =
1115					    sizeof(struct sctp_paramhdr) +
1116					    sizeof(*ippp);
1117					ph = mtod(oper, struct sctp_paramhdr *);
1118					ph->param_type =
1119					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1120					ph->param_length = htons(oper->m_len);
1121					ippp = (u_int32_t *)(ph + 1);
1122					*ippp = htonl(0x10000002);
1123				}
1124				sctp_abort_an_association(stcb->sctp_ep, stcb,
1125				    SCTP_PEER_FAULTY, oper);
1126				*abort_flag = 1;
1127			} else if (asoc->fragmented_delivery_inprogress) {
1128				/* Here we are ok with a MIDDLE or LAST piece */
1129				if (chk->rec.data.stream_number !=
1130				    asoc->str_of_pdapi) {
1131					/* Got to be the right STR No */
1132#ifdef SCTP_DEBUG
1133					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1134						printf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
1135						    chk->rec.data.stream_number,
1136						    asoc->str_of_pdapi);
1137					}
1138#endif
1139					MGET(oper, M_DONTWAIT, MT_DATA);
1140					if (oper) {
1141						struct sctp_paramhdr *ph;
1142						u_int32_t *ippp;
1143						oper->m_len =
1144						    sizeof(struct sctp_paramhdr) +
1145						    sizeof(*ippp);
1146						ph = mtod(oper,
1147						    struct sctp_paramhdr *);
1148						ph->param_type =
1149						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1150						ph->param_length =
1151						    htons(oper->m_len);
1152						ippp = (u_int32_t *)(ph + 1);
1153						*ippp = htonl(0x10000003);
1154					}
1155					sctp_abort_an_association(stcb->sctp_ep,
1156					    stcb, SCTP_PEER_FAULTY, oper);
1157					*abort_flag = 1;
1158				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1159				    SCTP_DATA_UNORDERED &&
1160				    chk->rec.data.stream_seq !=
1161				    asoc->ssn_of_pdapi) {
1162					/* Got to be the right STR Seq */
1163#ifdef SCTP_DEBUG
1164					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1165						printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1166						    chk->rec.data.stream_seq,
1167						    asoc->ssn_of_pdapi);
1168					}
1169#endif
1170					MGET(oper, M_DONTWAIT, MT_DATA);
1171					if (oper) {
1172						struct sctp_paramhdr *ph;
1173						u_int32_t *ippp;
1174						oper->m_len =
1175						    sizeof(struct sctp_paramhdr) +
1176						    sizeof(*ippp);
1177						ph = mtod(oper,
1178						    struct sctp_paramhdr *);
1179						ph->param_type =
1180						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1181						ph->param_length =
1182						    htons(oper->m_len);
1183						ippp = (u_int32_t *)(ph + 1);
1184						*ippp = htonl(0x10000004);
1185					}
1186					sctp_abort_an_association(stcb->sctp_ep,
1187					    stcb, SCTP_PEER_FAULTY, oper);
1188					*abort_flag = 1;
1189				}
1190			}
1191		}
1192		return;
1193	}
1194	/* Find its place */
1195	at = TAILQ_FIRST(&asoc->reasmqueue);
1196
1197	/* Grab the top flags */
1198	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1199		if (compare_with_wrap(at->rec.data.TSN_seq,
1200		    chk->rec.data.TSN_seq, MAX_TSN)) {
1201			/*
1202			 * one in queue is bigger than the new one, insert
1203			 * before this one
1204			 */
1205			/* A check */
1206			asoc->size_on_reasm_queue += chk->send_size;
1207			asoc->cnt_on_reasm_queue++;
1208			next = at;
1209			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1210			break;
1211		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1212			/* Gak, He sent me a duplicate str seq number */
1213			/*
1214			 * foo bar, I guess I will just free this new guy,
1215			 * should we abort too? FIX ME MAYBE? Or it COULD be
1216			 * that the SSN's have wrapped. Maybe I should compare
1217			 * to TSN somehow... sigh for now just blow away the
1218			 * chunk!
1219			 */
1220			if (chk->data)
1221				sctp_m_freem(chk->data);
1222			chk->data = NULL;
1223			sctp_free_remote_addr(chk->whoTo);
1224			SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
1225			sctppcbinfo.ipi_count_chunk--;
1226			if ((int)sctppcbinfo.ipi_count_chunk < 0) {
1227				panic("Chunk count is negative");
1228			}
1229			sctppcbinfo.ipi_gencnt_chunk++;
1230			return;
1231		} else {
1232			prev = at;
1233			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1234				/*
1235				 * We are at the end, insert it after this one
1236				 */
1237				/* check it first */
1238				asoc->size_on_reasm_queue += chk->send_size;
1239				asoc->cnt_on_reasm_queue++;
1240				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1241				break;
1242			}
1243		}
1244	}
1245	/* Now the audits */
1246	if (prev) {
1247		prev_tsn = chk->rec.data.TSN_seq - 1;
1248		if (prev_tsn == prev->rec.data.TSN_seq) {
1249			/*
1250			 * Ok the one I am dropping onto the end
1251			 * is the NEXT. A bit of valdiation here.
1252			 */
1253			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1254			    SCTP_DATA_FIRST_FRAG ||
1255			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1256			    SCTP_DATA_MIDDLE_FRAG) {
1257				/*
1258				 * Insert chk MUST be a MIDDLE or LAST fragment
1259				 */
1260				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1261				    SCTP_DATA_FIRST_FRAG) {
1262#ifdef SCTP_DEBUG
1263					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1264						printf("Prev check - It can be a midlle or last but not a first\n");
1265						printf("Gak, Evil plot, it's a FIRST!\n");
1266					}
1267#endif
1268					MGET(oper, M_DONTWAIT, MT_DATA);
1269					if (oper) {
1270						struct sctp_paramhdr *ph;
1271						u_int32_t *ippp;
1272
1273						oper->m_len =
1274						    sizeof(struct sctp_paramhdr) +
1275						    sizeof(*ippp);
1276						ph = mtod(oper,
1277						    struct sctp_paramhdr *);
1278						ph->param_type =
1279						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1280						ph->param_length =
1281						    htons(oper->m_len);
1282
1283						ippp = (u_int32_t *)(ph + 1);
1284						*ippp = htonl(0x10000005);
1285					}
1286					sctp_abort_an_association(stcb->sctp_ep,
1287					    stcb, SCTP_PEER_FAULTY, oper);
1288					*abort_flag = 1;
1289					return;
1290				}
1291				if (chk->rec.data.stream_number !=
1292				    prev->rec.data.stream_number) {
1293					/*
1294					 * Huh, need the correct STR here, they
1295					 * must be the same.
1296					 */
1297#ifdef SCTP_DEBUG
1298					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1299						printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1300						    chk->rec.data.stream_number,
1301						    prev->rec.data.stream_number);
1302					}
1303#endif
1304					MGET(oper, M_DONTWAIT, MT_DATA);
1305					if (oper) {
1306						struct sctp_paramhdr *ph;
1307						u_int32_t *ippp;
1308
1309						oper->m_len =
1310						    sizeof(struct sctp_paramhdr) +
1311						    sizeof(*ippp);
1312						ph = mtod(oper,
1313						    struct sctp_paramhdr *);
1314						ph->param_type =
1315						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1316						ph->param_length =
1317						    htons(oper->m_len);
1318						ippp = (u_int32_t *)(ph + 1);
1319						*ippp = htonl(0x10000006);
1320					}
1321
1322					sctp_abort_an_association(stcb->sctp_ep,
1323					    stcb, SCTP_PEER_FAULTY, oper);
1324
1325					*abort_flag = 1;
1326					return;
1327				}
1328				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1329				    chk->rec.data.stream_seq !=
1330				    prev->rec.data.stream_seq) {
1331					/*
1332					 * Huh, need the correct STR here, they
1333					 * must be the same.
1334					 */
1335#ifdef SCTP_DEBUG
1336					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1337						printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1338						    chk->rec.data.stream_seq,
1339						    prev->rec.data.stream_seq);
1340					}
1341#endif
1342					MGET(oper, M_DONTWAIT, MT_DATA);
1343					if (oper) {
1344						struct sctp_paramhdr *ph;
1345						u_int32_t *ippp;
1346
1347						oper->m_len =
1348						    sizeof(struct sctp_paramhdr) +
1349						    sizeof(*ippp);
1350						ph = mtod(oper,
1351						    struct sctp_paramhdr *);
1352						ph->param_type =
1353						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1354						ph->param_length =
1355						    htons(oper->m_len);
1356						ippp = (u_int32_t *)(ph + 1);
1357						*ippp = htonl(0x10000007);
1358					}
1359
1360					sctp_abort_an_association(stcb->sctp_ep,
1361					    stcb, SCTP_PEER_FAULTY, oper);
1362
1363					*abort_flag = 1;
1364					return;
1365				}
1366			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1367			    SCTP_DATA_LAST_FRAG) {
1368				/* Insert chk MUST be a FIRST */
1369				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1370				    SCTP_DATA_FIRST_FRAG) {
1371#ifdef SCTP_DEBUG
1372					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1373						printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1374					}
1375#endif
1376					MGET(oper, M_DONTWAIT, MT_DATA);
1377					if (oper) {
1378						struct sctp_paramhdr *ph;
1379						u_int32_t *ippp;
1380
1381						oper->m_len =
1382						    sizeof(struct sctp_paramhdr) +
1383						    sizeof(*ippp);
1384						ph = mtod(oper,
1385						    struct sctp_paramhdr *);
1386						ph->param_type =
1387						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1388						ph->param_length =
1389						    htons(oper->m_len);
1390						ippp = (u_int32_t *)(ph + 1);
1391						*ippp = htonl(0x10000008);
1392					}
1393
1394					sctp_abort_an_association(stcb->sctp_ep,
1395					    stcb, SCTP_PEER_FAULTY, oper);
1396
1397					*abort_flag = 1;
1398					return;
1399				}
1400			}
1401		}
1402	}
1403
1404	if (next) {
1405		post_tsn = chk->rec.data.TSN_seq + 1;
1406		if (post_tsn == next->rec.data.TSN_seq) {
1407			/*
1408			 * Ok the one I am inserting ahead of
1409			 * is my NEXT one. A bit of valdiation here.
1410			 */
1411			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1412				/* Insert chk MUST be a last fragment */
1413				if ((chk->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK)
1414				   != SCTP_DATA_LAST_FRAG) {
1415#ifdef SCTP_DEBUG
1416					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1417						printf("Next chk - Next is FIRST, we must be LAST\n");
1418						printf("Gak, Evil plot, its not a last!\n");
1419					}
1420#endif
1421					MGET(oper, M_DONTWAIT, MT_DATA);
1422					if (oper) {
1423						struct sctp_paramhdr *ph;
1424						u_int32_t *ippp;
1425
1426						oper->m_len =
1427						    sizeof(struct sctp_paramhdr) +
1428						    sizeof(*ippp);
1429						ph = mtod(oper,
1430						    struct sctp_paramhdr *);
1431						ph->param_type =
1432						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1433						ph->param_length =
1434						    htons(oper->m_len);
1435						ippp = (u_int32_t *)(ph + 1);
1436						*ippp = htonl(0x10000009);
1437					}
1438
1439					sctp_abort_an_association(stcb->sctp_ep,
1440					    stcb, SCTP_PEER_FAULTY, oper);
1441
1442					*abort_flag = 1;
1443					return;
1444				}
1445			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1446			    SCTP_DATA_MIDDLE_FRAG ||
1447			    (next->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) ==
1448			    SCTP_DATA_LAST_FRAG) {
1449				/* Insert chk CAN be MIDDLE or FIRST NOT LAST */
1450				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1451				    SCTP_DATA_LAST_FRAG) {
1452#ifdef SCTP_DEBUG
1453					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1454						printf("Next chk - Next is a MIDDLE/LAST\n");
1455						printf("Gak, Evil plot, new prev chunk is a LAST\n");
1456					}
1457#endif
1458					MGET(oper, M_DONTWAIT, MT_DATA);
1459					if (oper) {
1460						struct sctp_paramhdr *ph;
1461						u_int32_t *ippp;
1462
1463						oper->m_len =
1464						    sizeof(struct sctp_paramhdr) +
1465						    sizeof(*ippp);
1466						ph = mtod(oper,
1467						    struct sctp_paramhdr *);
1468						ph->param_type =
1469						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1470						ph->param_length =
1471						    htons(oper->m_len);
1472						ippp = (u_int32_t *)(ph + 1);
1473						*ippp = htonl(0x1000000a);
1474					}
1475					sctp_abort_an_association(stcb->sctp_ep,
1476					    stcb, SCTP_PEER_FAULTY, oper);
1477
1478					*abort_flag = 1;
1479					return;
1480				}
1481				if (chk->rec.data.stream_number !=
1482				    next->rec.data.stream_number) {
1483					/*
1484					 * Huh, need the correct STR here, they
1485					 * must be the same.
1486					 */
1487#ifdef SCTP_DEBUG
1488					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1489						printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1490						    chk->rec.data.stream_number,
1491						    next->rec.data.stream_number);
1492					}
1493#endif
1494					MGET(oper, M_DONTWAIT, MT_DATA);
1495					if (oper) {
1496						struct sctp_paramhdr *ph;
1497						u_int32_t *ippp;
1498
1499						oper->m_len =
1500						    sizeof(struct sctp_paramhdr) +
1501						    sizeof(*ippp);
1502						ph = mtod(oper,
1503						    struct sctp_paramhdr *);
1504						ph->param_type =
1505						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1506						ph->param_length =
1507						    htons(oper->m_len);
1508						ippp = (u_int32_t *)(ph + 1);
1509						*ippp = htonl(0x1000000b);
1510					}
1511
1512					sctp_abort_an_association(stcb->sctp_ep,
1513					    stcb, SCTP_PEER_FAULTY, oper);
1514
1515					*abort_flag = 1;
1516					return;
1517				}
1518				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1519				    chk->rec.data.stream_seq !=
1520				    next->rec.data.stream_seq) {
1521					/*
1522					 * Huh, need the correct STR here, they
1523					 * must be the same.
1524					 */
1525#ifdef SCTP_DEBUG
1526					if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1527						printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1528						    chk->rec.data.stream_seq,
1529						    next->rec.data.stream_seq);
1530					}
1531#endif
1532					MGET(oper, M_DONTWAIT, MT_DATA);
1533					if (oper) {
1534						struct sctp_paramhdr *ph;
1535						u_int32_t *ippp;
1536
1537						oper->m_len =
1538						    sizeof(struct sctp_paramhdr) +
1539						    sizeof(*ippp);
1540						ph = mtod(oper,
1541						    struct sctp_paramhdr *);
1542						ph->param_type =
1543						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1544						ph->param_length =
1545						    htons(oper->m_len);
1546						ippp = (u_int32_t *)(ph + 1);
1547						*ippp = htonl(0x1000000c);
1548					}
1549
1550					sctp_abort_an_association(stcb->sctp_ep,
1551					    stcb, SCTP_PEER_FAULTY, oper);
1552
1553					*abort_flag = 1;
1554					return;
1555
1556				}
1557			}
1558		}
1559	}
1560	/*
1561	 * now that we have all in there place we must check a number of
1562	 * things to see if we can send data to the ULP.
1563	 */
1564	/* we need to do some delivery, if we can */
1565	chk = TAILQ_FIRST(&asoc->reasmqueue);
1566	if (chk == NULL) {
1567		/* Huh? */
1568		asoc->size_on_reasm_queue = 0;
1569		asoc->cnt_on_reasm_queue = 0;
1570		return;
1571	}
1572	if (asoc->fragmented_delivery_inprogress == 0) {
1573		nxt_todel =
1574		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
1575		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
1576		    (nxt_todel == chk->rec.data.stream_seq ||
1577		     (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
1578			/*
1579			 * Yep the first one is here and its
1580			 * ok to deliver but should we?
1581			 */
1582			if (TAILQ_EMPTY(&asoc->delivery_queue) &&
1583			    (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
1584			     (asoc->size_on_reasm_queue >=
1585			      (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) &&
1586			      tsize))) {
1587				/*
1588				 * Yes, we setup to
1589				 * start reception, by backing down the TSN
1590				 * just in case we can't deliver. If we
1591				 */
1592				asoc->fragmented_delivery_inprogress = 1;
1593				asoc->tsn_last_delivered =
1594				    chk->rec.data.TSN_seq - 1;
1595				asoc->str_of_pdapi =
1596				    chk->rec.data.stream_number;
1597				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
1598				asoc->fragment_flags = chk->rec.data.rcv_flags;
1599				sctp_service_reassembly(stcb, asoc, 0);
1600			}
1601		}
1602	} else {
1603		sctp_service_reassembly(stcb, asoc, 0);
1604	}
1605}
1606
1607/*
1608 * This is an unfortunate routine. It checks to make sure a evil guy is not
1609 * stuffing us full of bad packet fragments. A broken peer could also do this
1610 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1611 * :< more cycles.
1612 */
1613static int
1614sctp_does_chk_belong_to_reasm(struct sctp_association *asoc,
1615    struct sctp_tmit_chunk *chk)
1616{
1617	struct sctp_tmit_chunk *at;
1618	u_int32_t tsn_est;
1619
1620	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1621		if (compare_with_wrap(chk->rec.data.TSN_seq,
1622		    at->rec.data.TSN_seq, MAX_TSN)) {
1623			/* is it one bigger? */
1624			tsn_est = at->rec.data.TSN_seq + 1;
1625			if (tsn_est == chk->rec.data.TSN_seq) {
1626				/* yep. It better be a last then*/
1627				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1628				    SCTP_DATA_LAST_FRAG) {
1629					/*
1630					 * Ok this guy belongs next to a guy
1631					 * that is NOT last, it should be a
1632					 * middle/last, not a complete chunk.
1633					 */
1634					return (1);
1635				} else {
1636					/*
1637					 * This guy is ok since its a LAST and
1638					 * the new chunk is a fully self-
1639					 * contained one.
1640					 */
1641					return (0);
1642				}
1643			}
1644		} else if (chk->rec.data.TSN_seq == at->rec.data.TSN_seq) {
1645			/* Software error since I have a dup? */
1646			return (1);
1647		} else {
1648			/*
1649			 * Ok, 'at' is larger than new chunk but does it
1650			 * need to be right before it.
1651			 */
1652			tsn_est = chk->rec.data.TSN_seq + 1;
1653			if (tsn_est == at->rec.data.TSN_seq) {
1654				/* Yep, It better be a first */
1655				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1656				    SCTP_DATA_FIRST_FRAG) {
1657					return (1);
1658				} else {
1659					return (0);
1660				}
1661			}
1662		}
1663	}
1664	return (0);
1665}
1666
1667extern unsigned int sctp_max_chunks_on_queue;
1668static int
1669sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1670    struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1671    struct sctp_nets *net, u_int32_t *high_tsn, int *abort_flag,
1672    int *break_flag, int last_chunk)
1673{
1674	/* Process a data chunk */
1675	/*  struct sctp_tmit_chunk *chk;*/
1676	struct sctp_tmit_chunk *chk;
1677	u_int32_t tsn, gap;
1678	struct mbuf *dmbuf;
1679	int the_len;
1680	u_int16_t strmno, strmseq;
1681	struct mbuf *oper;
1682
1683	chk = NULL;
1684	tsn = ntohl(ch->dp.tsn);
1685#ifdef SCTP_MAP_LOGGING
1686	sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1687#endif
1688	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1689	    asoc->cumulative_tsn == tsn) {
1690		/* It is a duplicate */
1691		sctp_pegs[SCTP_DUPTSN_RECVD]++;
1692		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1693			/* Record a dup for the next outbound sack */
1694			asoc->dup_tsns[asoc->numduptsns] = tsn;
1695			asoc->numduptsns++;
1696		}
1697		return (0);
1698	}
1699	/* Calculate the number of TSN's between the base and this TSN */
1700	if (tsn >= asoc->mapping_array_base_tsn) {
1701		gap  = tsn - asoc->mapping_array_base_tsn;
1702	} else {
1703		gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1704	}
1705	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1706		/* Can't hold the bit in the mapping at max array, toss it */
1707		return (0);
1708	}
1709	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1710		if (sctp_expand_mapping_array(asoc)) {
1711			/* Can't expand, drop it */
1712			return (0);
1713		}
1714	}
1715	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1716		*high_tsn = tsn;
1717	}
1718	/* See if we have received this one already */
1719	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1720		sctp_pegs[SCTP_DUPTSN_RECVD]++;
1721		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1722			/* Record a dup for the next outbound sack */
1723			asoc->dup_tsns[asoc->numduptsns] = tsn;
1724			asoc->numduptsns++;
1725		}
1726		if (!callout_pending(&asoc->dack_timer.timer)) {
1727			/*
1728			 * By starting the timer we assure that we
1729			 * WILL sack at the end of the packet
1730			 * when sctp_sack_check gets called.
1731			 */
1732			sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1733			    stcb, NULL);
1734		}
1735		return (0);
1736	}
1737	/*
1738	 * Check to see about the GONE flag, duplicates would cause
1739	 * a sack to be sent up above
1740	 */
1741	if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1742		/*
1743		 * wait a minute, this guy is gone, there is no
1744		 * longer a receiver. Send peer an ABORT!
1745		 */
1746		struct mbuf *op_err;
1747		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1748		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1749		*abort_flag = 1;
1750		return (0);
1751	}
1752	/*
1753	 * Now before going further we see if there is room. If NOT then
1754	 * we MAY let one through only IF this TSN is the one we are
1755	 * waiting for on a partial delivery API.
1756	 */
1757
1758	/* now do the tests */
1759	if (((asoc->cnt_on_all_streams +
1760	 asoc->cnt_on_delivery_queue +
1761	 asoc->cnt_on_reasm_queue +
1762	  asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1763	   (((int)asoc->my_rwnd) <= 0)) {
1764		/*
1765		 * When we have NO room in the rwnd we check
1766		 * to make sure the reader is doing its job...
1767		 */
1768		if (stcb->sctp_socket->so_rcv.sb_cc) {
1769			/* some to read, wake-up */
1770			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1771		}
1772		/* now is it in the mapping array of what we have accepted? */
1773		if (compare_with_wrap(tsn,
1774		    asoc->highest_tsn_inside_map, MAX_TSN)) {
1775
1776			/* Nope not in the valid range dump it */
1777#ifdef SCTP_DEBUG
1778			if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1779				printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld delq:%d!\n",
1780				    (u_long)tsn, (u_long)asoc->my_rwnd,
1781				    sctp_sbspace(&stcb->sctp_socket->so_rcv),
1782				    stcb->asoc.cnt_on_delivery_queue);
1783			}
1784#endif
1785			sctp_set_rwnd(stcb, asoc);
1786			if ((asoc->cnt_on_all_streams +
1787			    asoc->cnt_on_delivery_queue +
1788			    asoc->cnt_on_reasm_queue +
1789			    asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1790				sctp_pegs[SCTP_MSGC_DROP]++;
1791			} else {
1792				sctp_pegs[SCTP_RWND_DROPS]++;
1793			}
1794			*break_flag = 1;
1795			return (0);
1796		}
1797	}
1798	strmno = ntohs(ch->dp.stream_id);
1799	if (strmno >= asoc->streamincnt) {
1800		struct sctp_paramhdr *phdr;
1801		struct mbuf *mb;
1802
1803		MGETHDR(mb, M_DONTWAIT, MT_DATA);
1804		if (mb != NULL) {
1805			/* add some space up front so prepend will work well */
1806			mb->m_data += sizeof(struct sctp_chunkhdr);
1807			phdr = mtod(mb, struct sctp_paramhdr *);
1808			/*
1809			 * Error causes are just param's and this one has
1810			 * two back to back phdr, one with the error type
1811			 * and size, the other with the streamid and a rsvd
1812		 	 */
1813			mb->m_pkthdr.len = mb->m_len =
1814			    (sizeof(struct sctp_paramhdr) * 2);
1815			phdr->param_type = htons(SCTP_CAUSE_INV_STRM);
1816			phdr->param_length =
1817			    htons(sizeof(struct sctp_paramhdr) * 2);
1818			phdr++;
1819			/* We insert the stream in the type field */
1820			phdr->param_type = ch->dp.stream_id;
1821			/* And set the length to 0 for the rsvd field */
1822			phdr->param_length = 0;
1823			sctp_queue_op_err(stcb, mb);
1824		}
1825		sctp_pegs[SCTP_BAD_STRMNO]++;
1826		return (0);
1827	}
1828	/*
1829	 * Before we continue lets validate that we are not
1830	 * being fooled by an evil attacker. We can only
1831	 * have 4k chunks based on our TSN spread allowed
1832	 * by the mapping array 512 * 8 bits, so there is
1833	 * no way our stream sequence numbers could have wrapped.
1834	 * We of course only validate the FIRST fragment so the
1835	 * bit must be set.
1836	 */
1837	strmseq = ntohs(ch->dp.stream_sequence);
1838	if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1839	    (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1840	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1841	     strmseq, MAX_SEQ) ||
1842	     asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1843		/* The incoming sseq is behind where we last delivered? */
1844#ifdef SCTP_DEBUG
1845		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1846			printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1847			    strmseq,
1848			    asoc->strmin[strmno].last_sequence_delivered);
1849		}
1850#endif
1851		/*
1852		 * throw it in the stream so it gets cleaned up in
1853		 * association destruction
1854		 */
1855		MGET(oper, M_DONTWAIT, MT_DATA);
1856		if (oper) {
1857			struct sctp_paramhdr *ph;
1858			u_int32_t *ippp;
1859
1860			oper->m_len = sizeof(struct sctp_paramhdr) +
1861			    sizeof(*ippp);
1862			ph = mtod(oper, struct sctp_paramhdr *);
1863			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1864			ph->param_length = htons(oper->m_len);
1865			ippp = (u_int32_t *)(ph + 1);
1866			*ippp = htonl(0x20000001);
1867		}
1868		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY,
1869		    oper);
1870		sctp_pegs[SCTP_BAD_SSN_WRAP]++;
1871		*abort_flag = 1;
1872		return (0);
1873	}
1874
1875	the_len = (chk_length-sizeof(struct sctp_data_chunk));
1876	if (last_chunk == 0) {
1877		dmbuf = sctp_m_copym(*m,
1878		    (offset + sizeof(struct sctp_data_chunk)),
1879		    the_len, M_DONTWAIT);
1880	} else {
1881		/* We can steal the last chunk */
1882		dmbuf = *m;
1883		/* lop off the top part */
1884		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1885		if (dmbuf->m_pkthdr.len > the_len) {
1886			/* Trim the end round bytes off  too */
1887			m_adj(dmbuf, -(dmbuf->m_pkthdr.len-the_len));
1888		}
1889		sctp_pegs[SCTP_NO_COPY_IN]++;
1890	}
1891	if (dmbuf == NULL) {
1892		sctp_pegs[SCTP_DROP_NOMEMORY]++;
1893		return (0);
1894	}
1895	if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1896	    asoc->fragmented_delivery_inprogress == 0 &&
1897	    TAILQ_EMPTY(&asoc->delivery_queue) &&
1898	    ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1899	     ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1900	      TAILQ_EMPTY(&asoc->strmin[strmno].inqueue))) &&
1901	    ((long)(stcb->sctp_socket->so_rcv.sb_hiwat -
1902	            stcb->sctp_socket->so_rcv.sb_cc) >= (long)the_len)) {
1903		/* Candidate for express delivery */
1904		/*
1905		 * Its not fragmented,
1906		 * No PD-API is up,
1907		 * Nothing in the delivery queue,
1908		 * Its un-ordered OR ordered and the next to deliver AND
1909		 * nothing else is stuck on the stream queue,
1910		 * And there is room for it in the socket buffer.
1911		 * Lets just stuff it up the buffer....
1912		 */
1913
1914		struct mbuf *control, *mmm;
1915		struct sockaddr_in6 sin6;
1916		struct sockaddr_in6 lsa6;
1917		const struct sockaddr *to;
1918
1919		/* It would be nice to avoid this copy if we could :< */
1920		control = sctp_build_ctl_nchunk(stcb, tsn,
1921		    ch->dp.protocol_id, 0, strmno, strmseq,
1922		    ch->ch.chunk_flags);
1923		/* XXX need to append PKTHDR to the socket buffer first */
1924
1925		if ((dmbuf->m_flags & M_PKTHDR) == 0) {
1926			struct mbuf *tmp;
1927			MGETHDR(tmp, M_DONTWAIT, MT_DATA);
1928			if (tmp == NULL) {
1929
1930				/* no room! */
1931				if (control) {
1932					sctp_m_freem(control);
1933					stcb->asoc.my_rwnd_control_len -=
1934					    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1935				}
1936
1937				goto failed_express_del;
1938			}
1939			tmp->m_pkthdr.len = the_len;
1940			tmp->m_len = 0;
1941			tmp->m_next = dmbuf;
1942			dmbuf = tmp;
1943		}
1944		to = rtcache_getdst(&net->ro);
1945		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1946		    to->sa_family == AF_INET) {
1947			const struct sockaddr_in *sin;
1948
1949			sin = satocsin(to);
1950			in6_sin_2_v4mapsin6(sin, &sin6);
1951			to = (struct sockaddr *)&sin6;
1952		}
1953
1954		/* check and strip embedded scope junk */
1955		to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
1956		    &lsa6);
1957		if (((const struct sockaddr_in *)to)->sin_port == 0) {
1958			printf("Huh c, port is %d not net:%p %d?\n",
1959			       ((const struct sockaddr_in *)to)->sin_port,
1960			       net,
1961			       (int)(ntohs(stcb->rport)));
1962			/*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
1963			/* XXX */
1964		}
1965
1966		mmm = dmbuf;
1967		/* Mark the EOR */
1968		while (mmm->m_next != NULL) {
1969			mmm = mmm->m_next;
1970		}
1971		mmm->m_flags |= M_EOR;
1972		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1973			/* we have a new high score */
1974			asoc->highest_tsn_inside_map = tsn;
1975#ifdef SCTP_MAP_LOGGING
1976			sctp_log_map(0, 1, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1977#endif
1978		}
1979		SCTP_TCB_UNLOCK(stcb);
1980		SCTP_INP_WLOCK(stcb->sctp_ep);
1981		SCTP_TCB_LOCK(stcb);
1982		if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to, dmbuf,
1983		    control, stcb->asoc.my_vtag, stcb->sctp_ep)) {
1984			if (control) {
1985				sctp_m_freem(control);
1986				stcb->asoc.my_rwnd_control_len -=
1987				    CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1988			}
1989			sctp_m_freem(dmbuf);
1990			goto failed_express_del;
1991		}
1992		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
1993			if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
1994				stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
1995			}
1996		} else {
1997			stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
1998		}
1999		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2000		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2001		if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2002
2003			/* for ordered, bump what we delivered */
2004			asoc->strmin[strmno].last_sequence_delivered++;
2005		}
2006		sctp_pegs[SCTP_EXPRESS_ROUTE]++;
2007#ifdef SCTP_STR_LOGGING
2008		sctp_log_strm_del_alt(tsn, strmseq,
2009		    SCTP_STR_LOG_FROM_EXPRS_DEL);
2010#endif
2011#ifdef SCTP_DEBUG
2012		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2013			printf("Express Delivery succeeds\n");
2014		}
2015#endif
2016		goto finish_express_del;
2017	}
2018
2019 failed_express_del:
2020	/* If we reach here this is a new chunk */
2021	chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
2022	if (chk == NULL) {
2023		/* No memory so we drop the chunk */
2024		sctp_pegs[SCTP_DROP_NOMEMORY]++;
2025		if (last_chunk == 0) {
2026			/* we copied it, free the copy */
2027			sctp_m_freem(dmbuf);
2028		}
2029		return (0);
2030	}
2031	sctppcbinfo.ipi_count_chunk++;
2032	sctppcbinfo.ipi_gencnt_chunk++;
2033	chk->rec.data.TSN_seq = tsn;
2034	chk->rec.data.stream_seq = strmseq;
2035	chk->rec.data.stream_number = strmno;
2036	chk->rec.data.payloadtype = ch->dp.protocol_id;
2037	chk->rec.data.context = 0;
2038	chk->rec.data.doing_fast_retransmit = 0;
2039	chk->rec.data.rcv_flags = ch->ch.chunk_flags;
2040	chk->asoc = asoc;
2041	chk->send_size = the_len;
2042	chk->whoTo = net;
2043	net->ref_count++;
2044	chk->data = dmbuf;
2045
2046
2047	/* Mark it as received */
2048	/* Now queue it where it belongs */
2049	if ((chk->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
2050	    SCTP_DATA_NOT_FRAG) {
2051		/* First a sanity check */
2052		if (asoc->fragmented_delivery_inprogress) {
2053			/*
2054			 * Ok, we have a fragmented delivery in progress
2055			 * if this chunk is next to deliver OR belongs in
2056			 * our view to the reassembly, the peer is evil
2057			 * or broken.
2058			 */
2059			u_int32_t estimate_tsn;
2060			estimate_tsn = asoc->tsn_last_delivered + 1;
2061			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2062			    (estimate_tsn == chk->rec.data.TSN_seq)) {
2063				/* Evil/Broke peer */
2064				MGET(oper, M_DONTWAIT, MT_DATA);
2065				if (oper) {
2066					struct sctp_paramhdr *ph;
2067					u_int32_t *ippp;
2068
2069					oper->m_len =
2070					    sizeof(struct sctp_paramhdr) +
2071					    sizeof(*ippp);
2072					ph = mtod(oper, struct sctp_paramhdr *);
2073					ph->param_type =
2074					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2075					ph->param_length = htons(oper->m_len);
2076					ippp = (u_int32_t *)(ph + 1);
2077					*ippp = htonl(0x20000002);
2078				}
2079				sctp_abort_an_association(stcb->sctp_ep, stcb,
2080				    SCTP_PEER_FAULTY, oper);
2081
2082				*abort_flag = 1;
2083				sctp_pegs[SCTP_DROP_FRAG]++;
2084				return (0);
2085			} else {
2086				if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2087					MGET(oper, M_DONTWAIT, MT_DATA);
2088					if (oper) {
2089						struct sctp_paramhdr *ph;
2090						u_int32_t *ippp;
2091
2092						oper->m_len =
2093						    sizeof(struct sctp_paramhdr) +
2094						    sizeof(*ippp);
2095						ph = mtod(oper,
2096						    struct sctp_paramhdr *);
2097						ph->param_type =
2098						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2099						ph->param_length =
2100						    htons(oper->m_len);
2101						ippp = (u_int32_t *)(ph + 1);
2102						*ippp = htonl(0x20000003);
2103					}
2104					sctp_abort_an_association(stcb->sctp_ep,
2105					    stcb, SCTP_PEER_FAULTY, oper);
2106
2107					*abort_flag = 1;
2108					sctp_pegs[SCTP_DROP_FRAG]++;
2109					return (0);
2110				}
2111			}
2112		} else {
2113			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2114				/*
2115				 * Reassembly queue is NOT empty
2116				 * validate that this chk does not need to
2117				 * be in reasembly queue. If it does then
2118				 * our peer is broken or evil.
2119				 */
2120				if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2121					MGET(oper, M_DONTWAIT, MT_DATA);
2122					if (oper) {
2123						struct sctp_paramhdr *ph;
2124						u_int32_t *ippp;
2125
2126						oper->m_len =
2127						    sizeof(struct sctp_paramhdr) +
2128						    sizeof(*ippp);
2129						ph = mtod(oper,
2130						    struct sctp_paramhdr *);
2131						ph->param_type =
2132						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2133						ph->param_length =
2134						    htons(oper->m_len);
2135						ippp = (u_int32_t *)(ph + 1);
2136						*ippp = htonl(0x20000004);
2137					}
2138					sctp_abort_an_association(stcb->sctp_ep,
2139					    stcb, SCTP_PEER_FAULTY, oper);
2140
2141					*abort_flag = 1;
2142					sctp_pegs[SCTP_DROP_FRAG]++;
2143					return (0);
2144				}
2145			}
2146		}
2147		if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
2148			/* queue directly into socket buffer */
2149			sctp_deliver_data(stcb, asoc, chk, 0);
2150			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2151		} else {
2152			/* Special check for when streams are resetting.
2153			 * We could be more smart about this and check the
2154			 * actual stream to see if it is not being reset.. that
2155			 * way we would not create a HOLB when amongst streams
2156			 * being reset and those not being reset.
2157			 *
2158			 * We take complete messages that have a stream reset
2159			 * intervening (aka the TSN is after where our cum-ack needs
2160			 * to be) off and put them on a pending_reply_queue. The
2161			 * reassembly ones we do not have to worry about since
2162			 * they are all sorted and processed by TSN order. It
2163			 * is only the singletons I must worry about.
2164			 */
2165			if ((asoc->pending_reply) &&
2166			   ((compare_with_wrap(tsn, ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2167			    (tsn == ntohl(asoc->pending_reply->reset_at_tsn)))
2168				) {
2169				/* yep its past where we need to reset... go ahead and
2170				 * queue it.
2171				 */
2172				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue , chk, sctp_next);
2173			}  else {
2174				sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2175			}
2176		}
2177	} else {
2178		/* Into the re-assembly queue */
2179		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2180		if (*abort_flag) {
2181			sctp_pegs[SCTP_DROP_FRAG]++;
2182			return (0);
2183		}
2184	}
2185	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2186		/* we have a new high score */
2187		asoc->highest_tsn_inside_map = tsn;
2188#ifdef SCTP_MAP_LOGGING
2189		sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2190#endif
2191	}
2192 finish_express_del:
2193	if (last_chunk) {
2194		*m = NULL;
2195	}
2196	sctp_pegs[SCTP_PEG_TSNS_RCVD]++;
2197	/* Set it present please */
2198#ifdef SCTP_STR_LOGGING
2199	sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN);
2200#endif
2201#ifdef SCTP_MAP_LOGGING
2202	sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2203		     asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2204#endif
2205	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2206	return (1);
2207}
2208
2209void
2210sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2211{
2212	/*
2213	 * Now we also need to check the mapping array in a couple of ways.
2214	 * 1) Did we move the cum-ack point?
2215	 */
2216	struct sctp_association *asoc;
2217	int i, at;
2218	int m_size, all_ones;
2219	int slide_from, slide_end, lgap, distance;
2220#ifdef SCTP_MAP_LOGGING
2221	uint32_t old_cumack, old_base, old_highest;
2222	unsigned char aux_array[64];
2223#endif
2224
2225	asoc = &stcb->asoc;
2226	at = 0;
2227
2228#ifdef SCTP_MAP_LOGGING
2229	old_cumack = asoc->cumulative_tsn;
2230	old_base = asoc->mapping_array_base_tsn;
2231	old_highest = asoc->highest_tsn_inside_map;
2232	if (asoc->mapping_array_size < 64)
2233		memcpy(aux_array, asoc->mapping_array,
2234		    asoc->mapping_array_size);
2235	else
2236		memcpy(aux_array, asoc->mapping_array, 64);
2237#endif
2238
2239	/*
2240	 * We could probably improve this a small bit by calculating the
2241	 * offset of the current cum-ack as the starting point.
2242	 */
2243	all_ones = 1;
2244	m_size = stcb->asoc.mapping_array_size << 3;
2245	for (i = 0; i < m_size; i++) {
2246		if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
2247			/*
2248			 * Ok we found the first place that we are
2249			 * missing a TSN.
2250			 */
2251			at = i;
2252			all_ones = 0;
2253			asoc->cumulative_tsn = asoc->mapping_array_base_tsn +
2254			    (i - 1);
2255			break;
2256		}
2257	}
2258	if (compare_with_wrap(asoc->cumulative_tsn,
2259			      asoc->highest_tsn_inside_map,
2260			      MAX_TSN)) {
2261		panic("huh, cumack greater than high-tsn in map");
2262	}
2263	if (all_ones ||
2264	    (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2265		/* The complete array was completed by a single FR */
2266		/* highest becomes the cum-ack */
2267		int clr;
2268		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2269		/* clear the array */
2270		if (all_ones)
2271			clr = asoc->mapping_array_size;
2272		else {
2273			clr = (at >> 3) + 1;
2274			/*
2275			 * this should be the allones case
2276			 * but just in case :>
2277			 */
2278			if (clr > asoc->mapping_array_size)
2279				clr = asoc->mapping_array_size;
2280		}
2281		memset(asoc->mapping_array, 0, clr);
2282		/* base becomes one ahead of the cum-ack */
2283		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2284#ifdef SCTP_MAP_LOGGING
2285		sctp_log_map(old_base, old_cumack, old_highest,
2286		    SCTP_MAP_PREPARE_SLIDE);
2287		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2288		    asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2289#endif
2290	} else if (at >= 8) {
2291		/* we can slide the mapping array down */
2292		/* Calculate the new byte position we can move down */
2293		slide_from = at >> 3;
2294		/* now calculate the ceiling of the move using our highest TSN value */
2295		if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2296			lgap = asoc->highest_tsn_inside_map -
2297			    asoc->mapping_array_base_tsn;
2298		} else {
2299			lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2300			    asoc->highest_tsn_inside_map + 1;
2301		}
2302		slide_end = lgap >> 3;
2303		if (slide_end < slide_from) {
2304			panic("impossible slide");
2305		}
2306		distance = (slide_end-slide_from) + 1;
2307#ifdef SCTP_MAP_LOGGING
2308		sctp_log_map(old_base, old_cumack, old_highest,
2309		    SCTP_MAP_PREPARE_SLIDE);
2310		sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2311		    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2312#endif
2313		if (distance + slide_from > asoc->mapping_array_size ||
2314		    distance < 0) {
2315#ifdef SCTP_DEBUG
2316			if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2317				printf("Ugh bad addition.. you can't hrumpp!\n");
2318			}
2319#endif
2320			/*
2321			 * Here we do NOT slide forward the array so that
2322			 * hopefully when more data comes in to fill it up
2323			 * we will be able to slide it forward. Really
2324			 * I don't think this should happen :-0
2325			 */
2326
2327#ifdef SCTP_MAP_LOGGING
2328			sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2329			    (uint32_t)asoc->mapping_array_size,
2330			    SCTP_MAP_SLIDE_NONE);
2331#endif
2332		} else {
2333			int ii;
2334			for (ii = 0; ii < distance; ii++) {
2335				asoc->mapping_array[ii] =
2336				    asoc->mapping_array[slide_from + ii];
2337			}
2338			for (ii = distance;ii <= slide_end; ii++) {
2339				asoc->mapping_array[ii] = 0;
2340			}
2341			asoc->mapping_array_base_tsn += (slide_from << 3);
2342#ifdef SCTP_MAP_LOGGING
2343			sctp_log_map(asoc->mapping_array_base_tsn,
2344			    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2345			    SCTP_MAP_SLIDE_RESULT);
2346#endif
2347		}
2348	}
2349
2350        /* check the special flag for stream resets */
2351	if ((asoc->pending_reply) &&
2352	   ((compare_with_wrap((asoc->cumulative_tsn+1), ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2353	    ((asoc->cumulative_tsn+1) ==  ntohl(asoc->pending_reply->reset_at_tsn)))
2354		) {
2355		/* we have finished working through the backlogged TSN's now
2356		 * time to reset streams.
2357		 * 1: call reset function.
2358		 * 2: free pending_reply space
2359		 * 3: distribute any chunks in pending_reply_queue.
2360		 */
2361		struct sctp_tmit_chunk *chk;
2362		sctp_handle_stream_reset_response(stcb, asoc->pending_reply);
2363		free(asoc->pending_reply, M_PCB);
2364		asoc->pending_reply = NULL;
2365		chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2366		while (chk) {
2367			TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
2368			sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2369			if (*abort_flag) {
2370				return;
2371			}
2372			chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2373		}
2374	}
2375	/*
2376	 * Now we need to see if we need to queue a sack or just start
2377	 * the timer (if allowed).
2378	 */
2379	if (ok_to_sack) {
2380		if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2381			/*
2382			 * Ok special case, in SHUTDOWN-SENT case.
2383			 * here we maker sure SACK timer is off and
2384			 * instead send a SHUTDOWN and a SACK
2385			 */
2386			if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2387				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2388				    stcb->sctp_ep, stcb, NULL);
2389			}
2390#ifdef SCTP_DEBUG
2391			if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2392				printf("%s:%d sends a shutdown\n",
2393				       __FILE__,
2394				       __LINE__
2395				       );
2396			}
2397#endif
2398			sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2399			sctp_send_sack(stcb);
2400		} else {
2401			int is_a_gap;
2402			/* is there a gap now ? */
2403			is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2404			    stcb->asoc.cumulative_tsn, MAX_TSN);
2405			if ((stcb->asoc.first_ack_sent == 0) ||	/* First time we send a sack */
2406			    ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no longer is one */
2407			    (stcb->asoc.numduptsns) ||		/* we have dup's */
2408			    (is_a_gap) ||			/* is still a gap */
2409			    (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second packet */
2410				) {
2411				/*
2412			 	 * Ok we must build a SACK since the timer
2413				 * is pending, we got our first packet OR
2414				 * there are gaps or duplicates.
2415				 */
2416				stcb->asoc.first_ack_sent = 1;
2417				sctp_send_sack(stcb);
2418				/* The sending will stop the timer */
2419			} else {
2420				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2421				    stcb->sctp_ep, stcb, NULL);
2422			}
2423		}
2424	}
2425}
2426
2427void
2428sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
2429{
2430	struct sctp_tmit_chunk *chk;
2431	int tsize, cntDel;
2432	u_int16_t nxt_todel;
2433
2434	cntDel = 0;
2435	if (asoc->fragmented_delivery_inprogress) {
2436		sctp_service_reassembly(stcb, asoc, hold_locks);
2437	}
2438	/* Can we proceed further, i.e. the PD-API is complete */
2439	if (asoc->fragmented_delivery_inprogress) {
2440		/* no */
2441		return;
2442	}
2443
2444	/*
2445	 * Yes, reassembly delivery no longer in progress see if we
2446	 * have some on the sb hold queue.
2447	 */
2448	do {
2449		if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) {
2450			if (cntDel == 0)
2451				sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2452			break;
2453		}
2454		/* If deliver_data says no we must stop */
2455		if (sctp_deliver_data(stcb, asoc, (struct sctp_tmit_chunk *)NULL, hold_locks) == 0)
2456			break;
2457		cntDel++;
2458		chk = TAILQ_FIRST(&asoc->delivery_queue);
2459	} while (chk);
2460	if (cntDel) {
2461		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2462	}
2463	/*
2464	 * Now is there some other chunk I can deliver
2465	 * from the reassembly queue.
2466	 */
2467	chk = TAILQ_FIRST(&asoc->reasmqueue);
2468	if (chk == NULL) {
2469		asoc->size_on_reasm_queue = 0;
2470		asoc->cnt_on_reasm_queue = 0;
2471		return;
2472	}
2473	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2474	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2475	    ((nxt_todel == chk->rec.data.stream_seq) ||
2476	     (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2477		/*
2478		 * Yep the first one is here. We setup to
2479		 * start reception, by backing down the TSN
2480		 * just in case we can't deliver.
2481		 */
2482
2483		/*
2484		 * Before we start though either all of the
2485		 * message should be here or 1/4 the socket buffer
2486		 * max or nothing on the delivery queue and something
2487		 * can be delivered.
2488		 */
2489		if (TAILQ_EMPTY(&asoc->delivery_queue) &&
2490		    (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2491		     (asoc->size_on_reasm_queue >=
2492		      (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) && tsize))) {
2493			asoc->fragmented_delivery_inprogress = 1;
2494			asoc->tsn_last_delivered = chk->rec.data.TSN_seq-1;
2495			asoc->str_of_pdapi = chk->rec.data.stream_number;
2496			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2497			asoc->fragment_flags = chk->rec.data.rcv_flags;
2498			sctp_service_reassembly(stcb, asoc, hold_locks);
2499		}
2500	}
2501}
2502
2503int
2504sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2505    struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2506    struct sctp_nets *net, u_int32_t *high_tsn)
2507{
2508	struct sctp_data_chunk *ch, chunk_buf;
2509	struct sctp_association *asoc;
2510	int num_chunks = 0;	/* number of control chunks processed */
2511	int chk_length, break_flag, last_chunk;
2512	int abort_flag = 0, was_a_gap = 0;
2513	struct mbuf *m;
2514
2515	/* set the rwnd */
2516	sctp_set_rwnd(stcb, &stcb->asoc);
2517
2518	m = *mm;
2519	asoc = &stcb->asoc;
2520	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2521	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2522		/* there was a gap before this data was processed */
2523		was_a_gap = 1;
2524	}
2525	/*
2526	 * setup where we got the last DATA packet from for
2527	 * any SACK that may need to go out. Don't bump
2528	 * the net. This is done ONLY when a chunk
2529	 * is assigned.
2530	 */
2531	asoc->last_data_chunk_from = net;
2532
2533	/*
2534	 * Now before we proceed we must figure out if this
2535	 * is a wasted cluster... i.e. it is a small packet
2536	 * sent in and yet the driver underneath allocated a
2537	 * full cluster for it. If so we must copy it to a
2538	 * smaller mbuf and free up the cluster mbuf. This
2539	 * will help with cluster starvation.
2540	 */
2541	if (m->m_len < (long)MHLEN && m->m_next == NULL) {
2542		/* we only handle mbufs that are singletons.. not chains */
2543		MGET(m, M_DONTWAIT, MT_DATA);
2544		if (m) {
2545			/* ok lets see if we can copy the data up */
2546			vaddr_t *from, *to;
2547
2548			if ((*mm)->m_flags & M_PKTHDR) {
2549				/* got to copy the header first */
2550#ifdef __APPLE__
2551				M_COPY_PKTHDR(m, (*mm));
2552#else
2553				m_move_pkthdr(m, (*mm));
2554#endif
2555			}
2556			/* get the pointers and copy */
2557			to = mtod(m, vaddr_t *);
2558			from = mtod((*mm), vaddr_t *);
2559			memcpy(to, from, (*mm)->m_len);
2560			/* copy the length and free up the old */
2561			m->m_len = (*mm)->m_len;
2562			sctp_m_freem(*mm);
2563			/* sucess, back copy */
2564			*mm = m;
2565		} else {
2566			/* We are in trouble in the mbuf world .. yikes */
2567			m = *mm;
2568		}
2569	}
2570	/* get pointer to the first chunk header */
2571	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2572	    sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2573	if (ch == NULL) {
2574		printf(" ... its short\n");
2575		return (1);
2576	}
2577	/*
2578	 * process all DATA chunks...
2579	 */
2580
2581#ifdef SCTP_DEBUG
2582	if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2583		printf("In process data off:%d length:%d iphlen:%d ch->type:%d\n",
2584		    *offset, length, iphlen, (int)ch->ch.chunk_type);
2585	}
2586#endif
2587
2588	*high_tsn = asoc->cumulative_tsn;
2589	break_flag = 0;
2590	while (ch->ch.chunk_type == SCTP_DATA) {
2591		/* validate chunk length */
2592		chk_length = ntohs(ch->ch.chunk_length);
2593		if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1 ||
2594		    length - *offset < chk_length) {
2595			/*
2596			 * Need to send an abort since we had a invalid
2597			 * data chunk.
2598			 */
2599			struct mbuf *op_err;
2600			MGET(op_err, M_DONTWAIT, MT_DATA);
2601			if (op_err) {
2602				struct sctp_paramhdr *ph;
2603				u_int32_t *ippp;
2604
2605				op_err->m_len = sizeof(struct sctp_paramhdr) +
2606				    sizeof(*ippp);
2607				ph = mtod(op_err, struct sctp_paramhdr *);
2608				ph->param_type =
2609				    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2610				ph->param_length = htons(op_err->m_len);
2611				ippp = (u_int32_t *)(ph + 1);
2612				*ippp = htonl(0x30000001);
2613			}
2614			sctp_abort_association(inp, stcb, m, iphlen, sh,
2615			    op_err);
2616			return (2);
2617		}
2618#ifdef SCTP_DEBUG
2619		if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2620			printf("A chunk of len:%d to process (tot:%d)\n",
2621			    chk_length, length - *offset);
2622		}
2623#endif
2624
2625#ifdef SCTP_AUDITING_ENABLED
2626		sctp_audit_log(0xB1, 0);
2627#endif
2628		if (SCTP_SIZE32(chk_length) == *offset - length) {
2629			last_chunk = 1;
2630		} else {
2631			last_chunk = 0;
2632		}
2633		if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2634		    chk_length, net, high_tsn, &abort_flag, &break_flag,
2635		    last_chunk)) {
2636			num_chunks++;
2637#ifdef SCTP_DEBUG
2638			if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2639				printf("Now incr num_chunks to %d\n",
2640				    num_chunks);
2641			}
2642#endif
2643		}
2644		if (abort_flag)
2645			return (2);
2646
2647		if (break_flag) {
2648			/*
2649			 * Set because of out of rwnd space and no drop rep
2650			 * space left.
2651			 */
2652			break;
2653		}
2654
2655		*offset += SCTP_SIZE32(chk_length);
2656		if (*offset >= length) {
2657			/* no more data left in the mbuf chain */
2658			break;
2659		}
2660		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2661		    sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2662		if (ch == NULL) {
2663			*offset = length;
2664			break;
2665		}
2666	} /* while */
2667	if (break_flag) {
2668		/*
2669		 * we need to report rwnd overrun drops.
2670		 */
2671		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2672	}
2673	if (num_chunks) {
2674		/*
2675		 * Did we get data, if so update the time for
2676		 * auto-close and give peer credit for being
2677		 * alive.
2678		 */
2679		sctp_pegs[SCTP_DATA_DG_RECV]++;
2680		stcb->asoc.overall_error_count = 0;
2681		SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2682	}
2683	/* now service all of the reassm queue and delivery queue */
2684	sctp_service_queues(stcb, asoc, 0);
2685	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2686		/*
2687		 * Assure that we ack right away by making
2688		 * sure that a d-ack timer is running. So the
2689		 * sack_check will send a sack.
2690		 */
2691		sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2692		    net);
2693	}
2694	/* Start a sack timer or QUEUE a SACK for sending */
2695	sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2696	if (abort_flag)
2697		return (2);
2698
2699	return (0);
2700}
2701
2702static void
2703sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2704    struct sctp_sack_chunk *ch, u_long last_tsn, u_long *biggest_tsn_acked,
2705    u_long *biggest_newly_acked_tsn, int num_seg, int *ecn_seg_sums)
2706{
2707	/************************************************/
2708	/* process fragments and update sendqueue        */
2709	/************************************************/
2710	struct sctp_sack *sack;
2711	struct sctp_gap_ack_block *frag;
2712	struct sctp_tmit_chunk *tp1;
2713	int i;
2714	unsigned int j;
2715#ifdef SCTP_FR_LOGGING
2716	int num_frs=0;
2717#endif
2718	uint16_t frag_strt, frag_end, primary_flag_set;
2719	u_long last_frag_high;
2720
2721	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2722		primary_flag_set = 1;
2723	} else {
2724		primary_flag_set = 0;
2725	}
2726
2727	sack = &ch->sack;
2728	frag = (struct sctp_gap_ack_block *)((vaddr_t)sack +
2729	    sizeof(struct sctp_sack));
2730	tp1 = NULL;
2731	last_frag_high = 0;
2732	for (i = 0; i < num_seg; i++) {
2733		frag_strt = ntohs(frag->start);
2734		frag_end = ntohs(frag->end);
2735		/* some sanity checks on the fragment offsets */
2736		if (frag_strt > frag_end) {
2737			/* this one is malformed, skip */
2738			frag++;
2739			continue;
2740		}
2741		if (compare_with_wrap((frag_end+last_tsn), *biggest_tsn_acked,
2742		    MAX_TSN))
2743			*biggest_tsn_acked = frag_end+last_tsn;
2744
2745		/* mark acked dgs and find out the highestTSN being acked */
2746		if (tp1 == NULL) {
2747			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2748
2749			/* save the locations of the last frags */
2750			last_frag_high = frag_end + last_tsn;
2751		} else {
2752			/*
2753			 * now lets see if we need to reset the queue
2754			 * due to a out-of-order SACK fragment
2755			 */
2756			if (compare_with_wrap(frag_strt+last_tsn,
2757			    last_frag_high, MAX_TSN)) {
2758				/*
2759				 * if the new frag starts after the last TSN
2760				 * frag covered, we are ok
2761				 * and this one is beyond the last one
2762				 */
2763				;
2764			} else {
2765				/*
2766				 * ok, they have reset us, so we need to reset
2767				 * the queue this will cause extra hunting but
2768				 * hey, they chose the performance
2769				 * hit when they failed to order there gaps..
2770				 */
2771				tp1 = TAILQ_FIRST(&asoc->sent_queue);
2772			}
2773			last_frag_high = frag_end + last_tsn;
2774		}
2775		for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2776			while (tp1) {
2777#ifdef SCTP_FR_LOGGING
2778				if (tp1->rec.data.doing_fast_retransmit)
2779					num_frs++;
2780#endif
2781
2782				if (tp1->rec.data.TSN_seq == j) {
2783					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2784						/* must be held until cum-ack passes */
2785						/* ECN Nonce: Add the nonce value to the sender's nonce sum */
2786						if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2787							/*
2788							 * If it is less than
2789							 * ACKED, it is now
2790							 * no-longer in flight.
2791							 * Higher values may
2792							 * already be set via
2793							 * previous Gap Ack
2794							 * Blocks...
2795							 * i.e. ACKED or MARKED.
2796							 */
2797							if (compare_with_wrap(tp1->rec.data.TSN_seq,
2798							    *biggest_newly_acked_tsn,
2799							    MAX_TSN)) {
2800								*biggest_newly_acked_tsn =
2801								    tp1->rec.data.TSN_seq;
2802							}
2803							sctp_flight_size_decrease(tp1);
2804
2805							sctp_total_flight_decrease(stcb, tp1);
2806
2807							if (tp1->snd_count < 2) {
2808								/* True non-retransmited chunk */
2809								tp1->whoTo->net_ack2 +=
2810								    tp1->send_size;
2811
2812								/* update RTO too? */
2813								if (tp1->do_rtt) {
2814									tp1->whoTo->RTO =
2815									    sctp_calculate_rto(stcb,
2816									    asoc,
2817									    tp1->whoTo,
2818									    &tp1->sent_rcv_time);
2819									tp1->whoTo->rto_pending = 0;
2820									tp1->do_rtt = 0;
2821								}
2822							}
2823						}
2824						if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
2825						    tp1->sent != SCTP_DATAGRAM_UNSENT &&
2826						    compare_with_wrap(tp1->rec.data.TSN_seq,
2827						    asoc->this_sack_highest_gap,
2828						    MAX_TSN)) {
2829							asoc->this_sack_highest_gap =
2830							    tp1->rec.data.TSN_seq;
2831							if (primary_flag_set) {
2832								tp1->whoTo->cacc_saw_newack = 1;
2833							}
2834						}
2835						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2836#ifdef SCTP_DEBUG
2837							if (sctp_debug_on &
2838							    SCTP_DEBUG_INDATA3) {
2839								printf("Hmm. one that is in RESEND that is now ACKED\n");
2840							}
2841#endif
2842							sctp_ucount_decr(asoc->sent_queue_retran_cnt);
2843#ifdef SCTP_AUDITING_ENABLED
2844							sctp_audit_log(0xB2,
2845							    (asoc->sent_queue_retran_cnt & 0x000000ff));
2846#endif
2847
2848						}
2849						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2850						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2851						tp1->sent = SCTP_DATAGRAM_MARKED;
2852					}
2853					break;
2854				} /* if (tp1->TSN_seq == j) */
2855				if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
2856				    MAX_TSN))
2857					break;
2858				tp1 = TAILQ_NEXT(tp1, sctp_next);
2859			}/* end while (tp1) */
2860		}  /* end for (j = fragStart */
2861		frag++; /* next one */
2862	}
2863#ifdef SCTP_FR_LOGGING
2864	if (num_frs)
2865		sctp_log_fr(*biggest_tsn_acked, *biggest_newly_acked_tsn,
2866		    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2867#endif
2868}
2869
2870static void
2871sctp_check_for_revoked(struct sctp_association *asoc, u_long cum_ack,
2872    u_long biggest_tsn_acked)
2873{
2874	struct sctp_tmit_chunk *tp1;
2875	int tot_revoked=0;
2876
2877	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2878	while (tp1) {
2879		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
2880		    MAX_TSN)) {
2881			/*
2882			 * ok this guy is either ACK or MARKED. If it is ACKED
2883			 * it has been previously acked but not this time i.e.
2884			 * revoked.  If it is MARKED it was ACK'ed again.
2885			 */
2886			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2887				/* it has been revoked */
2888				/*
2889				 * We do NOT add back to flight size here since
2890				 * it is really NOT in flight. Resend (when/if
2891				 * it occurs will add to flight size
2892				 */
2893				tp1->sent = SCTP_DATAGRAM_SENT;
2894				tot_revoked++;
2895			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2896				/* it has been re-acked in this SACK */
2897				tp1->sent = SCTP_DATAGRAM_ACKED;
2898			}
2899		}
2900		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2901		    MAX_TSN)) {
2902			/* above the sack */
2903			break;
2904		}
2905		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2906			break;
2907		tp1 = TAILQ_NEXT(tp1, sctp_next);
2908	}
2909	if (tot_revoked > 0) {
2910		/* Setup the ecn nonce re-sync point. We
2911		 * do this since once data is revoked
2912		 * we begin to retransmit things, which
2913		 * do NOT have the ECN bits set. This means
2914		 * we are now out of sync and must wait until
2915		 * we get back in sync with the peer to
2916		 * check ECN bits.
2917		 */
2918		tp1 = TAILQ_FIRST(&asoc->send_queue);
2919		if (tp1 == NULL) {
2920			asoc->nonce_resync_tsn = asoc->sending_seq;
2921		} else {
2922			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
2923		}
2924		asoc->nonce_wait_for_ecne = 0;
2925		asoc->nonce_sum_check = 0;
2926	}
2927
2928}
2929
2930extern int sctp_peer_chunk_oh;
2931
2932static void
2933sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2934    u_long biggest_tsn_acked, int strike_enabled,
2935    u_long biggest_tsn_newly_acked, int accum_moved)
2936{
2937	struct sctp_tmit_chunk *tp1;
2938	int strike_flag=0;
2939	struct timeval now;
2940	int tot_retrans=0;
2941	u_int32_t sending_seq;
2942	int primary_switch_active = 0;
2943	int double_switch_active = 0;
2944
2945	/* select the sending_seq, this is
2946	 * either the next thing ready to
2947	 * be sent but not transmitted, OR,
2948	 * the next seq we assign.
2949	 */
2950	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2951	if (tp1 == NULL) {
2952		sending_seq = asoc->sending_seq;
2953	} else {
2954		sending_seq = tp1->rec.data.TSN_seq;
2955	}
2956
2957	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2958		primary_switch_active = 1;
2959	}
2960	if (asoc->primary_destination->dest_state & SCTP_ADDR_DOUBLE_SWITCH) {
2961		double_switch_active = 1;
2962	}
2963	if (stcb->asoc.peer_supports_prsctp ) {
2964		SCTP_GETTIME_TIMEVAL(&now);
2965	}
2966	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2967	while (tp1) {
2968		strike_flag=0;
2969		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2970		    MAX_TSN) ||
2971		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2972			/* done */
2973			break;
2974		}
2975		if ((tp1->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) ==
2976		    SCTP_PR_SCTP_ENABLED &&
2977		    tp1->sent < SCTP_DATAGRAM_ACKED) {
2978			/* Is it expired? */
2979#ifndef __FreeBSD__
2980			if (timercmp(&now, &tp1->rec.data.timetodrop, >))
2981#else
2982			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
2983#endif
2984			{
2985				/* Yes so drop it */
2986				if (tp1->data != NULL) {
2987					sctp_release_pr_sctp_chunk(stcb, tp1,
2988					    (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
2989					    &asoc->sent_queue);
2990				}
2991				tp1 = TAILQ_NEXT(tp1, sctp_next);
2992				continue;
2993			}
2994		}
2995
2996		if (compare_with_wrap(tp1->rec.data.TSN_seq,
2997		    asoc->this_sack_highest_gap, MAX_TSN)) {
2998			/* we are beyond the tsn in the sack  */
2999 			break;
3000		}
3001		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3002			/* either a RESEND, ACKED, or MARKED */
3003			/* skip */
3004			tp1 = TAILQ_NEXT(tp1, sctp_next);
3005			continue;
3006		}
3007		if (primary_switch_active && (strike_enabled == 0)) {
3008			if (tp1->whoTo != asoc->primary_destination) {
3009				/*
3010				 * We can only strike things on the primary if
3011				 * the strike_enabled flag is clear
3012				 */
3013				tp1 = TAILQ_NEXT(tp1, sctp_next);
3014				continue;
3015			}
3016		} else if (primary_switch_active) {
3017			if (tp1->whoTo->cacc_saw_newack == 0) {
3018				/*
3019				 * Only one was received but it was NOT
3020				 * this one.
3021				 */
3022				tp1 = TAILQ_NEXT(tp1, sctp_next);
3023				continue;
3024			}
3025		}
3026		if (double_switch_active &&
3027		    (compare_with_wrap(asoc->primary_destination->next_tsn_at_change,
3028		    tp1->rec.data.TSN_seq, MAX_TSN))) {
3029			/*
3030			 * With a double switch we do NOT mark unless we
3031			 * are beyond the switch point.
3032			 */
3033			tp1 = TAILQ_NEXT(tp1, sctp_next);
3034			continue;
3035		}
3036		/*
3037		 * Here we check to see if we were have already done a FR
3038		 * and if so we see if the biggest TSN we saw in the sack is
3039		 * smaller than the recovery point. If so we don't strike the
3040		 * tsn... otherwise we CAN strike the TSN.
3041		 */
3042		if (accum_moved && asoc->fast_retran_loss_recovery) {
3043		 	/*
3044		 	 * Strike the TSN if in fast-recovery and
3045			 * cum-ack moved.
3046			 */
3047			tp1->sent++;
3048		} else if (tp1->rec.data.doing_fast_retransmit) {
3049			/*
3050			 * For those that have done a FR we must
3051			 * take special consideration if we strike. I.e
3052			 * the biggest_newly_acked must be higher
3053			 * than the sending_seq at the time we did
3054			 * the FR.
3055			 */
3056#ifdef SCTP_FR_TO_ALTERNATE
3057			/*
3058			 * If FR's go to new networks, then we
3059			 * must only do this for singly homed asoc's. However
3060			 * if the FR's go to the same network (Armando's work)
3061			 * then its ok to FR multiple times.
3062			 */
3063			if (asoc->numnets < 2)
3064#else
3065			if (1)
3066#endif
3067			{
3068				if ((compare_with_wrap(biggest_tsn_newly_acked,
3069				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3070				    (biggest_tsn_newly_acked ==
3071				     tp1->rec.data.fast_retran_tsn)) {
3072					/*
3073					 * Strike the TSN, since this ack is
3074					 * beyond where things were when we did
3075					 * a FR.
3076					 */
3077#ifdef SCTP_FR_LOGGING
3078					sctp_log_fr(biggest_tsn_newly_acked,
3079					    tp1->rec.data.TSN_seq,
3080					    tp1->rec.data.fast_retran_tsn,
3081					    SCTP_FR_LOG_STRIKE_CHUNK);
3082#endif
3083					tp1->sent++;
3084					strike_flag=1;
3085				}
3086			}
3087 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3088 		    biggest_tsn_newly_acked, MAX_TSN)) {
3089			/*
3090			 * We don't strike these:
3091			 * This is the  HTNA algorithm i.e. we don't strike
3092			 * If our TSN is larger than the Highest TSN Newly
3093			 * Acked.
3094			 */
3095			;
3096	 	} else {
3097		 	/* Strike the TSN */
3098			tp1->sent++;
3099		}
3100		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3101			/* Increment the count to resend */
3102			struct sctp_nets *alt;
3103
3104#ifdef SCTP_FR_LOGGING
3105			sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3106			    0, SCTP_FR_MARKED);
3107#endif
3108			if (strike_flag) {
3109				/* This is a subsequent FR */
3110				sctp_pegs[SCTP_DUP_FR]++;
3111			}
3112			asoc->sent_queue_retran_cnt++;
3113#ifdef SCTP_FR_TO_ALTERNATE
3114			/* Can we find an alternate? */
3115			alt = sctp_find_alternate_net(stcb, tp1->whoTo);
3116#else
3117			/*
3118			 * default behavior is to NOT retransmit FR's
3119			 * to an alternate. Armando Caro's paper details
3120			 * why.
3121			 */
3122			alt = tp1->whoTo;
3123#endif
3124			tp1->rec.data.doing_fast_retransmit = 1;
3125			tot_retrans++;
3126			/* mark the sending seq for possible subsequent FR's */
3127			if (TAILQ_EMPTY(&asoc->send_queue)) {
3128				/*
3129				 * If the queue of send is empty then its the
3130				 * next sequence number that will be assigned so
3131				 * we subtract one from this to get the one we
3132				 * last sent.
3133				 */
3134 				tp1->rec.data.fast_retran_tsn = sending_seq - 1;
3135			} else {
3136				/*
3137			 	 * If there are chunks on the send queue
3138				 * (unsent data that has made it from the
3139				 * stream queues but not out the door, we take
3140				 * the first one (which will have the lowest
3141				 * TSN) and subtract one to get the one we last
3142				 * sent.
3143				 */
3144				struct sctp_tmit_chunk *ttt;
3145				ttt = TAILQ_FIRST(&asoc->send_queue);
3146				tp1->rec.data.fast_retran_tsn =
3147				    ttt->rec.data.TSN_seq - 1;
3148			}
3149			if (tp1->do_rtt) {
3150				/*
3151				 * this guy had a RTO calculation pending on it,
3152				 * cancel it
3153				 */
3154				tp1->whoTo->rto_pending = 0;
3155				tp1->do_rtt = 0;
3156			}
3157			/* fix counts and things */
3158
3159			tp1->whoTo->net_ack++;
3160			sctp_flight_size_decrease(tp1);
3161#ifdef SCTP_LOG_RWND
3162			sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3163				      asoc->peers_rwnd , tp1->send_size, sctp_peer_chunk_oh);
3164#endif
3165			/* add back to the rwnd */
3166			asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3167
3168			/* remove from the total flight */
3169			sctp_total_flight_decrease(stcb, tp1);
3170			if (alt != tp1->whoTo) {
3171				/* yes, there is an alternate. */
3172				sctp_free_remote_addr(tp1->whoTo);
3173				tp1->whoTo = alt;
3174				alt->ref_count++;
3175			}
3176		}
3177		tp1 = TAILQ_NEXT(tp1, sctp_next);
3178	} /* while (tp1) */
3179
3180	if (tot_retrans > 0) {
3181		/* Setup the ecn nonce re-sync point. We
3182		 * do this since once we go to FR something
3183		 * we introduce a Karn's rule scenario and
3184		 * won't know the totals for the ECN bits.
3185		 */
3186		asoc->nonce_resync_tsn = sending_seq;
3187		asoc->nonce_wait_for_ecne = 0;
3188		asoc->nonce_sum_check = 0;
3189	}
3190
3191}
3192
3193struct sctp_tmit_chunk *
3194sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3195    struct sctp_association *asoc)
3196{
3197	struct sctp_tmit_chunk *tp1, *tp2, *a_adv=NULL;
3198	struct timeval now;
3199	int now_filled=0;
3200
3201	if (asoc->peer_supports_prsctp == 0) {
3202		return (NULL);
3203	}
3204	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3205	while (tp1) {
3206		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3207		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3208			/* no chance to advance, out of here */
3209			break;
3210		}
3211		if ((tp1->flags & SCTP_PR_SCTP_ENABLED) == 0) {
3212			/*
3213			 * We can't fwd-tsn past any that are reliable
3214			 * aka retransmitted until the asoc fails.
3215			 */
3216			break;
3217		}
3218		if (!now_filled) {
3219			SCTP_GETTIME_TIMEVAL(&now);
3220			now_filled = 1;
3221		}
3222		tp2 = TAILQ_NEXT(tp1, sctp_next);
3223		/*
3224		 * now we got a chunk which is marked for another
3225		 * retransmission to a PR-stream but has run
3226		 * out its chances already maybe OR has been
3227		 * marked to skip now. Can we skip it if its a
3228		 * resend?
3229		 */
3230		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3231		    (tp1->flags & SCTP_PR_SCTP_BUFFER) == 0) {
3232			/*
3233			 * Now is this one marked for resend and its time
3234			 * is now up?
3235			 */
3236#ifndef __FreeBSD__
3237			if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3238#else
3239			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3240#endif
3241			{
3242				/* Yes so drop it */
3243				if (tp1->data) {
3244					sctp_release_pr_sctp_chunk(stcb, tp1,
3245					    (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3246					    &asoc->sent_queue);
3247				}
3248			} else {
3249				/*
3250				 * No, we are done when hit one for resend whos
3251				 * time as not expired.
3252				 */
3253				break;
3254			}
3255		}
3256		/*
3257		 * Ok now if this chunk is marked to drop it
3258		 * we can clean up the chunk, advance our peer ack point
3259		 * and we can check the next chunk.
3260		 */
3261		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3262			/* advance PeerAckPoint goes forward */
3263			asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3264			a_adv = tp1;
3265			/*
3266			 * we don't want to de-queue it here. Just wait for the
3267			 * next peer SACK to come with a new cumTSN and then
3268			 * the chunk will be dropped in the normal fashion.
3269			 */
3270			if (tp1->data) {
3271				sctp_free_bufspace(stcb, asoc, tp1);
3272#ifdef SCTP_DEBUG
3273				if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3274					printf("--total out:%lu total_mbuf_out:%lu\n",
3275					    (u_long)asoc->total_output_queue_size,
3276					    (u_long)asoc->total_output_mbuf_queue_size);
3277				}
3278#endif
3279				/*
3280				 * Maybe there should be another notification
3281				 * type
3282				 */
3283				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3284				    (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3285				    tp1);
3286				sctp_m_freem(tp1->data);
3287				tp1->data = NULL;
3288				sctp_sowwakeup(stcb->sctp_ep,
3289				    stcb->sctp_socket);
3290			}
3291		} else {
3292			/* If it is still in RESEND we can advance no further */
3293			break;
3294		}
3295		/*
3296		 * If we hit here we just dumped tp1, move to next
3297		 * tsn on sent queue.
3298		 */
3299		tp1 = tp2;
3300	}
3301	return (a_adv);
3302}
3303
3304#ifdef SCTP_HIGH_SPEED
3305struct sctp_hs_raise_drop {
3306	int32_t cwnd;
3307	int32_t increase;
3308	int32_t drop_percent;
3309};
3310
3311#define SCTP_HS_TABLE_SIZE 73
3312
3313struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3314	{38,1,50},	/* 0   */
3315	{118,2,44},	/* 1   */
3316	{221,3,41},	/* 2   */
3317	{347,4,38},	/* 3   */
3318	{495,5,37},	/* 4   */
3319	{663,6,35},	/* 5   */
3320	{851,7,34},	/* 6   */
3321	{1058,8,33},	/* 7   */
3322	{1284,9,32},	/* 8   */
3323	{1529,10,31},	/* 9   */
3324	{1793,11,30},	/* 10  */
3325	{2076,12,29},	/* 11  */
3326	{2378,13,28},	/* 12  */
3327	{2699,14,28},	/* 13  */
3328	{3039,15,27},	/* 14  */
3329	{3399,16,27},	/* 15  */
3330	{3778,17,26},	/* 16  */
3331	{4177,18,26},	/* 17  */
3332	{4596,19,25},	/* 18  */
3333	{5036,20,25},	/* 19  */
3334	{5497,21,24},	/* 20  */
3335	{5979,22,24},	/* 21  */
3336	{6483,23,23},	/* 22  */
3337	{7009,24,23},	/* 23  */
3338	{7558,25,22},	/* 24  */
3339	{8130,26,22},	/* 25  */
3340	{8726,27,22},	/* 26  */
3341	{9346,28,21},	/* 27  */
3342	{9991,29,21},	/* 28  */
3343	{10661,30,21},  /* 29  */
3344	{11358,31,20},  /* 30  */
3345	{12082,32,20},  /* 31  */
3346	{12834,33,20},  /* 32  */
3347	{13614,34,19},  /* 33  */
3348	{14424,35,19},  /* 34  */
3349	{15265,36,19},  /* 35  */
3350	{16137,37,19},  /* 36  */
3351	{17042,38,18},  /* 37  */
3352	{17981,39,18},  /* 38  */
3353	{18955,40,18},  /* 39  */
3354	{19965,41,17},  /* 40  */
3355	{21013,42,17},  /* 41  */
3356	{22101,43,17},  /* 42  */
3357	{23230,44,17},  /* 43  */
3358	{24402,45,16},  /* 44  */
3359	{25618,46,16},  /* 45  */
3360	{26881,47,16},  /* 46  */
3361	{28193,48,16},  /* 47  */
3362	{29557,49,15},  /* 48  */
3363	{30975,50,15},  /* 49  */
3364	{32450,51,15},  /* 50  */
3365	{33986,52,15},  /* 51  */
3366	{35586,53,14},  /* 52  */
3367	{37253,54,14},  /* 53  */
3368	{38992,55,14},  /* 54  */
3369	{40808,56,14},  /* 55  */
3370	{42707,57,13},  /* 56  */
3371	{44694,58,13},  /* 57  */
3372	{46776,59,13},  /* 58  */
3373	{48961,60,13},  /* 59  */
3374	{51258,61,13},  /* 60  */
3375	{53677,62,12},  /* 61  */
3376	{56230,63,12},  /* 62  */
3377	{58932,64,12},  /* 63  */
3378	{61799,65,12},  /* 64  */
3379	{64851,66,11},  /* 65  */
3380	{68113,67,11},  /* 66  */
3381	{71617,68,11},  /* 67  */
3382	{75401,69,10},  /* 68  */
3383	{79517,70,10},  /* 69  */
3384	{84035,71,10},  /* 70  */
3385	{89053,72,10},  /* 71  */
3386	{94717,73,9}    /* 72  */
3387};
3388
3389static void
3390sctp_hs_cwnd_increase(struct sctp_nets *net)
3391{
3392 	int cur_val, i, indx, incr;
3393
3394	cur_val = net->cwnd >> 10;
3395	indx = SCTP_HS_TABLE_SIZE - 1;
3396
3397	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3398		/* normal mode */
3399		if (net->net_ack > net->mtu) {
3400			net->cwnd += net->mtu;
3401#ifdef SCTP_CWND_LOGGING
3402			sctp_log_cwnd(net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3403#endif
3404		} else {
3405			net->cwnd += net->net_ack;
3406#ifdef SCTP_CWND_LOGGING
3407			sctp_log_cwnd(net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3408#endif
3409		}
3410	} else {
3411		for (i=net->last_hs_used; i<SCTP_HS_TABLE_SIZE; i++) {
3412			if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3413				indx = i;
3414				break;
3415			}
3416		}
3417		net->last_hs_used = indx;
3418		incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3419		net->cwnd += incr;
3420#ifdef SCTP_CWND_LOGGING
3421		sctp_log_cwnd(net, incr, SCTP_CWND_LOG_FROM_SS);
3422#endif
3423	}
3424}
3425
3426static void
3427sctp_hs_cwnd_decrease(struct sctp_nets *net)
3428{
3429 	int cur_val, i, indx;
3430#ifdef SCTP_CWND_LOGGING
3431	int old_cwnd = net->cwnd;
3432#endif
3433
3434	cur_val = net->cwnd >> 10;
3435	indx = net->last_hs_used;
3436	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3437		/* normal mode */
3438		net->ssthresh = net->cwnd / 2;
3439		if (net->ssthresh < (net->mtu*2)) {
3440			net->ssthresh = 2 * net->mtu;
3441		}
3442		net->cwnd = net->ssthresh;
3443#ifdef SCTP_CWND_LOGGING
3444		sctp_log_cwnd(net, (net->cwnd-old_cwnd), SCTP_CWND_LOG_FROM_FR);
3445#endif
3446	} else {
3447		/* drop by the proper amount */
3448		net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3449		    sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3450		net->cwnd = net->ssthresh;
3451		/* now where are we */
3452		indx = net->last_hs_used;
3453		cur_val = net->cwnd >> 10;
3454		/* reset where we are in the table */
3455		if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3456			/* feel out of hs */
3457			net->last_hs_used = 0;
3458		} else {
3459			for (i = indx; i >= 1; i--) {
3460				if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3461					break;
3462				}
3463			}
3464			net->last_hs_used = indx;
3465		}
3466	}
3467}
3468#endif
3469
3470void
3471sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
3472    struct sctp_nets *net_from, int *abort_now)
3473{
3474	struct sctp_association *asoc;
3475	struct sctp_sack *sack;
3476	struct sctp_tmit_chunk *tp1, *tp2;
3477	u_long cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked;
3478	uint16_t num_seg;
3479	unsigned int sack_length;
3480	uint32_t send_s;
3481	int some_on_streamwheel;
3482	int strike_enabled = 0, cnt_of_cacc = 0;
3483	int accum_moved = 0;
3484	int marking_allowed = 1;
3485	int will_exit_fast_recovery=0;
3486	u_int32_t a_rwnd;
3487	struct sctp_nets *net = NULL;
3488	int nonce_sum_flag, ecn_seg_sums=0;
3489	asoc = &stcb->asoc;
3490
3491	/*
3492	 * Handle the incoming sack on data I have been sending.
3493	 */
3494
3495	/*
3496	 * we take any chance we can to service our queues since we
3497	 * cannot get awoken when the socket is read from :<
3498	 */
3499	asoc->overall_error_count = 0;
3500
3501	if (asoc->sent_queue_retran_cnt) {
3502#ifdef SCTP_DEBUG
3503		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3504			printf("Handling SACK for asoc:%p retran:%d\n",
3505			       asoc, asoc->sent_queue_retran_cnt);
3506		}
3507#endif
3508	}
3509
3510	sctp_service_queues(stcb, asoc, 0);
3511
3512	/*
3513	 * Now perform the actual SACK handling:
3514	 * 1) Verify that it is not an old sack, if so discard.
3515	 * 2) If there is nothing left in the send queue (cum-ack is equal
3516	 *    to last acked) then you have a duplicate too, update any rwnd
3517	 *    change and verify no timers are running. then return.
3518	 * 3) Process any new consequtive data i.e. cum-ack moved
3519	 *    process these first and note that it moved.
3520	 * 4) Process any sack blocks.
3521	 * 5) Drop any acked from the queue.
3522	 * 6) Check for any revoked blocks and mark.
3523	 * 7) Update the cwnd.
3524	 * 8) Nothing left, sync up flightsizes and things, stop all timers
3525	 *    and also check for shutdown_pending state. If so then go ahead
3526	 *    and send off the shutdown. If in shutdown recv, send off the
3527	 *    shutdown-ack and start that timer, Ret.
3528	 * 9) Strike any non-acked things and do FR procedure if needed being
3529	 *    sure to set the FR flag.
3530	 * 10) Do pr-sctp procedures.
3531	 * 11) Apply any FR penalties.
3532	 * 12) Assure we will SACK if in shutdown_recv state.
3533	 */
3534
3535	sack_length = ntohs(ch->ch.chunk_length);
3536	if (sack_length < sizeof(struct sctp_sack_chunk)) {
3537#ifdef SCTP_DEBUG
3538		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3539			printf("Bad size on sack chunk .. to small\n");
3540		}
3541#endif
3542		return;
3543	}
3544	/* ECN Nonce */
3545	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
3546	sack = &ch->sack;
3547	cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
3548	num_seg = ntohs(sack->num_gap_ack_blks);
3549
3550	/* reality check */
3551	if (TAILQ_EMPTY(&asoc->send_queue)) {
3552		send_s = asoc->sending_seq;
3553	} else {
3554		tp1 = TAILQ_FIRST(&asoc->send_queue);
3555		send_s = tp1->rec.data.TSN_seq;
3556	}
3557
3558	if (sctp_strict_sacks) {
3559		if (cum_ack == send_s ||
3560		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
3561			struct mbuf *oper;
3562			/*
3563			 * no way, we have not even sent this TSN out yet.
3564			 * Peer is hopelessly messed up with us.
3565			 */
3566		hopeless_peer:
3567			*abort_now = 1;
3568			/* XXX */
3569			MGET(oper, M_DONTWAIT, MT_DATA);
3570			if (oper) {
3571				struct sctp_paramhdr *ph;
3572				u_int32_t *ippp;
3573
3574				oper->m_len = sizeof(struct sctp_paramhdr) +
3575					sizeof(*ippp);
3576				ph = mtod(oper, struct sctp_paramhdr *);
3577				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3578				ph->param_length = htons(oper->m_len);
3579				ippp = (u_int32_t *)(ph + 1);
3580				*ippp = htonl(0x30000002);
3581			}
3582			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
3583			return;
3584		}
3585	}
3586	/* update the Rwnd of the peer */
3587	a_rwnd = (u_int32_t)ntohl(sack->a_rwnd);
3588	if (asoc->sent_queue_retran_cnt) {
3589#ifdef SCTP_DEBUG
3590		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3591			printf("cum_ack:%lx num_seg:%u last_acked_seq:%x\n",
3592			       cum_ack, (u_int)num_seg, asoc->last_acked_seq);
3593		}
3594#endif
3595	}
3596	if (compare_with_wrap(asoc->t3timeout_highest_marked, cum_ack, MAX_TSN)) {
3597		/* we are not allowed to mark for FR */
3598		marking_allowed = 0;
3599	}
3600	/**********************/
3601	/* 1) check the range */
3602	/**********************/
3603	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
3604		/* acking something behind */
3605		if (asoc->sent_queue_retran_cnt) {
3606#ifdef SCTP_DEBUG
3607			if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3608				printf("The cum-ack is behind us\n");
3609			}
3610#endif
3611		}
3612		return;
3613	}
3614
3615	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3616		/* nothing left on sendqueue.. consider done */
3617#ifdef SCTP_LOG_RWND
3618		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
3619				  asoc->peers_rwnd, 0, 0, a_rwnd);
3620#endif
3621		asoc->peers_rwnd = a_rwnd;
3622		if (asoc->sent_queue_retran_cnt) {
3623#ifdef SCTP_DEBUG
3624			if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3625				printf("Huh? retran set but none on queue\n");
3626			}
3627#endif
3628			asoc->sent_queue_retran_cnt = 0;
3629		}
3630		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3631			/* SWS sender side engages */
3632			asoc->peers_rwnd = 0;
3633		}
3634		/* stop any timers */
3635		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3636			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3637					stcb, net);
3638			net->partial_bytes_acked = 0;
3639			net->flight_size = 0;
3640		}
3641		asoc->total_flight = 0;
3642		asoc->total_flight_count = 0;
3643		return;
3644	}
3645	/*
3646	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
3647	 * things. The total byte count acked is tracked in netAckSz AND
3648	 * netAck2 is used to track the total bytes acked that are un-
3649	 * amibguious and were never retransmitted. We track these on a
3650	 * per destination address basis.
3651	 */
3652	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3653		net->prev_cwnd = net->cwnd;
3654		net->net_ack = 0;
3655		net->net_ack2 = 0;
3656	}
3657	/* process the new consecutive TSN first */
3658	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3659	while (tp1) {
3660		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
3661				      MAX_TSN) ||
3662		    last_tsn == tp1->rec.data.TSN_seq) {
3663			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3664				/* ECN Nonce: Add the nonce to the sender's nonce sum */
3665				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3666				accum_moved = 1;
3667				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3668					/*
3669					 * If it is less than ACKED, it is now
3670					 * no-longer in flight. Higher values
3671					 * may occur during marking
3672					 */
3673					if ((tp1->whoTo->dest_state &
3674					     SCTP_ADDR_UNCONFIRMED) &&
3675					    (tp1->snd_count < 2) ) {
3676						/*
3677						 * If there was no retran and
3678						 * the address is un-confirmed
3679						 * and we sent there and are
3680						 * now sacked.. its confirmed,
3681						 * mark it so.
3682						 */
3683						tp1->whoTo->dest_state &=
3684							~SCTP_ADDR_UNCONFIRMED;
3685					}
3686					sctp_flight_size_decrease(tp1);
3687					sctp_total_flight_decrease(stcb, tp1);
3688					tp1->whoTo->net_ack += tp1->send_size;
3689					if (tp1->snd_count < 2) {
3690						/* True non-retransmited chunk */
3691						tp1->whoTo->net_ack2 +=
3692							tp1->send_size;
3693						/* update RTO too? */
3694						if (tp1->do_rtt) {
3695							tp1->whoTo->RTO =
3696								sctp_calculate_rto(stcb,
3697										   asoc, tp1->whoTo,
3698										   &tp1->sent_rcv_time);
3699							tp1->whoTo->rto_pending = 0;
3700							tp1->do_rtt = 0;
3701						}
3702					}
3703				}
3704				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3705#ifdef SCTP_DEBUG
3706					if (sctp_debug_on & SCTP_DEBUG_INDATA3) {
3707						printf("Hmm. one that is in RESEND that is now ACKED\n");
3708					}
3709#endif
3710					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3711#ifdef SCTP_AUDITING_ENABLED
3712					sctp_audit_log(0xB3,
3713						       (asoc->sent_queue_retran_cnt & 0x000000ff));
3714#endif
3715
3716				}
3717				tp1->sent = SCTP_DATAGRAM_ACKED;
3718			}
3719		} else {
3720			break;
3721		}
3722		tp1 = TAILQ_NEXT(tp1, sctp_next);
3723	}
3724	/*******************************************/
3725	/* cancel ALL T3-send timer if accum moved */
3726	/*******************************************/
3727	if (accum_moved) {
3728		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3729			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3730					stcb, net);
3731		}
3732	}
3733	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
3734	/* always set this up to cum-ack */
3735	asoc->this_sack_highest_gap = last_tsn;
3736
3737	if (num_seg * sizeof(struct sctp_gap_ack_block) + sizeof(struct sctp_sack_chunk) > sack_length) {
3738		/* skip corrupt segments */
3739		strike_enabled = 0;
3740		goto skip_segments;
3741	}
3742
3743	if (num_seg > 0) {
3744		if (asoc->primary_destination->dest_state &
3745		    SCTP_ADDR_SWITCH_PRIMARY) {
3746			/* clear the nets CACC flags */
3747			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3748				net->cacc_saw_newack = 0;
3749			}
3750		}
3751		/*
3752		 * thisSackHighestGap will increase while handling NEW segments
3753		 */
3754
3755		sctp_handle_segments(stcb, asoc, ch, last_tsn,
3756		    &biggest_tsn_acked, &biggest_tsn_newly_acked,
3757		    num_seg, &ecn_seg_sums);
3758
3759		if (sctp_strict_sacks) {
3760			/* validate the biggest_tsn_acked in the gap acks
3761			 * if strict adherence is wanted.
3762			 */
3763			if ((biggest_tsn_acked == send_s) ||
3764			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
3765				/*
3766				 * peer is either confused or we are under
3767				 * attack. We must abort.
3768				 */
3769				goto hopeless_peer;
3770			}
3771		}
3772
3773		if (asoc->primary_destination->dest_state &
3774		    SCTP_ADDR_SWITCH_PRIMARY) {
3775			/* clear the nets CACC flags */
3776			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3777				if (net->cacc_saw_newack) {
3778					cnt_of_cacc++;
3779				}
3780			}
3781		}
3782
3783	}
3784
3785	if (cnt_of_cacc < 2) {
3786		strike_enabled = 1;
3787	} else {
3788		strike_enabled = 0;
3789	}
3790 skip_segments:
3791	/********************************************/
3792	/* drop the acked chunks from the sendqueue */
3793	/********************************************/
3794	asoc->last_acked_seq = cum_ack;
3795	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3796		if ((cum_ack == asoc->primary_destination->next_tsn_at_change) ||
3797		    (compare_with_wrap(cum_ack,
3798				       asoc->primary_destination->next_tsn_at_change, MAX_TSN))) {
3799			struct sctp_nets *lnet;
3800			/* Turn off the switch flag for ALL addresses */
3801			TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3802				asoc->primary_destination->dest_state &=
3803					~(SCTP_ADDR_SWITCH_PRIMARY|SCTP_ADDR_DOUBLE_SWITCH);
3804			}
3805		}
3806	}
3807	/* Drag along the t3 timeout point so we don't have a problem at wrap */
3808	if (marking_allowed) {
3809		asoc->t3timeout_highest_marked = cum_ack;
3810	}
3811	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3812	do {
3813		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
3814				      MAX_TSN)) {
3815			break;
3816		}
3817		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3818			/* no more sent on list */
3819			break;
3820		}
3821		tp2 = TAILQ_NEXT(tp1, sctp_next);
3822		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3823		if (tp1->data) {
3824			sctp_free_bufspace(stcb, asoc, tp1);
3825#ifdef SCTP_DEBUG
3826			if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3827				printf("--total out:%lu total_mbuf_out:%lu\n",
3828				       (u_long)asoc->total_output_queue_size,
3829				       (u_long)asoc->total_output_mbuf_queue_size);
3830			}
3831#endif
3832
3833			sctp_m_freem(tp1->data);
3834			if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3835				asoc->sent_queue_cnt_removeable--;
3836			}
3837
3838		}
3839		tp1->data = NULL;
3840		asoc->sent_queue_cnt--;
3841		sctp_free_remote_addr(tp1->whoTo);
3842		sctppcbinfo.ipi_count_chunk--;
3843		asoc->chunks_on_out_queue--;
3844
3845		if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3846			panic("Chunk count is going negative");
3847		}
3848		SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, tp1);
3849		sctppcbinfo.ipi_gencnt_chunk++;
3850		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3851		tp1 = tp2;
3852	} while (tp1 != NULL);
3853
3854
3855	if (asoc->fast_retran_loss_recovery && accum_moved) {
3856		if (compare_with_wrap(asoc->last_acked_seq,
3857				      asoc->fast_recovery_tsn, MAX_TSN) ||
3858		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
3859			/* Setup so we will exit RFC2582 fast recovery */
3860			will_exit_fast_recovery = 1;
3861		}
3862	}
3863
3864	/* Check for revoked fragments if we hand
3865	 * fragments in a previous segment. If we
3866	 * had no previous fragments we cannot have
3867	 * a revoke issue.
3868	 */
3869	if (asoc->saw_sack_with_frags)
3870		sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
3871
3872	if (num_seg)
3873		asoc->saw_sack_with_frags = 1;
3874	else
3875		asoc->saw_sack_with_frags = 0;
3876
3877	/******************************/
3878	/* update cwnd                */
3879	/******************************/
3880	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3881		/* if nothing was acked on this destination skip it */
3882		if (net->net_ack == 0)
3883			continue;
3884
3885		if (net->net_ack2 > 0) {
3886			/*
3887			 * Karn's rule applies to clearing error count,
3888			 * this is optional.
3889			 */
3890			net->error_count = 0;
3891			if ((net->dest_state&SCTP_ADDR_NOT_REACHABLE) ==
3892			    SCTP_ADDR_NOT_REACHABLE) {
3893				/* addr came good */
3894				net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3895				net->dest_state |= SCTP_ADDR_REACHABLE;
3896				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3897						SCTP_RECEIVED_SACK, (void *)net);
3898				/* now was it the primary? if so restore */
3899				if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3900					sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
3901				}
3902			}
3903		}
3904
3905		if (asoc->fast_retran_loss_recovery &&
3906		    will_exit_fast_recovery == 0) {
3907			/* If we are in loss recovery we skip any cwnd update */
3908			sctp_pegs[SCTP_CWND_SKIP]++;
3909			goto skip_cwnd_update;
3910		}
3911		if (accum_moved) {
3912			/* If the cumulative ack moved we can proceed */
3913			if (net->cwnd <= net->ssthresh) {
3914				/* We are in slow start */
3915				if (net->flight_size + net->net_ack >=
3916				    net->cwnd ) {
3917#ifdef SCTP_HIGH_SPEED
3918					sctp_hs_cwnd_increase(net);
3919#else
3920					if (net->net_ack > net->mtu) {
3921						net->cwnd += net->mtu;
3922#ifdef SCTP_CWND_LOGGING
3923						sctp_log_cwnd(net, net->mtu,
3924							      SCTP_CWND_LOG_FROM_SS);
3925#endif
3926
3927					} else {
3928						net->cwnd += net->net_ack;
3929#ifdef SCTP_CWND_LOGGING
3930						sctp_log_cwnd(net, net->net_ack,
3931							      SCTP_CWND_LOG_FROM_SS);
3932#endif
3933
3934					}
3935#endif
3936					sctp_pegs[SCTP_CWND_SS]++;
3937				} else {
3938					unsigned int dif;
3939					sctp_pegs[SCTP_CWND_NOUSE_SS]++;
3940					dif = net->cwnd - (net->flight_size +
3941							   net->net_ack);
3942#ifdef SCTP_CWND_LOGGING
3943/*					sctp_log_cwnd(net, net->net_ack,
3944					SCTP_CWND_LOG_NOADV_SS);*/
3945#endif
3946					if (dif > sctp_pegs[SCTP_CWND_DIFF_SA]) {
3947						sctp_pegs[SCTP_CWND_DIFF_SA] =
3948							dif;
3949						sctp_pegs[SCTP_OQS_AT_SS] =
3950							asoc->total_output_queue_size;
3951						sctp_pegs[SCTP_SQQ_AT_SS] =
3952							asoc->sent_queue_cnt;
3953						sctp_pegs[SCTP_SQC_AT_SS] =
3954							asoc->send_queue_cnt;
3955					}
3956				}
3957			} else {
3958				/* We are in congestion avoidance */
3959				if (net->flight_size + net->net_ack >=
3960				    net->cwnd) {
3961					/*
3962					 * add to pba only if we had a cwnd's
3963					 * worth (or so) in flight OR the
3964					 * burst limit was applied.
3965					 */
3966					net->partial_bytes_acked +=
3967						net->net_ack;
3968
3969					/*
3970					 * Do we need to increase
3971					 * (if pba is > cwnd)?
3972					 */
3973					if (net->partial_bytes_acked >=
3974					    net->cwnd) {
3975						if (net->cwnd <
3976						    net->partial_bytes_acked) {
3977							net->partial_bytes_acked -=
3978								net->cwnd;
3979						} else {
3980							net->partial_bytes_acked =
3981								0;
3982						}
3983						net->cwnd += net->mtu;
3984#ifdef SCTP_CWND_LOGGING
3985						sctp_log_cwnd(net, net->mtu,
3986							      SCTP_CWND_LOG_FROM_CA);
3987#endif
3988						sctp_pegs[SCTP_CWND_CA]++;
3989					}
3990				} else {
3991					unsigned int dif;
3992					sctp_pegs[SCTP_CWND_NOUSE_CA]++;
3993#ifdef SCTP_CWND_LOGGING
3994/*					sctp_log_cwnd(net, net->net_ack,
3995					SCTP_CWND_LOG_NOADV_CA);
3996*/
3997#endif
3998					dif = net->cwnd - (net->flight_size +
3999							   net->net_ack);
4000					if (dif > sctp_pegs[SCTP_CWND_DIFF_CA]) {
4001						sctp_pegs[SCTP_CWND_DIFF_CA] =
4002							dif;
4003						sctp_pegs[SCTP_OQS_AT_CA] =
4004							asoc->total_output_queue_size;
4005						sctp_pegs[SCTP_SQQ_AT_CA] =
4006							asoc->sent_queue_cnt;
4007						sctp_pegs[SCTP_SQC_AT_CA] =
4008							asoc->send_queue_cnt;
4009
4010					}
4011
4012				}
4013			}
4014		} else {
4015			sctp_pegs[SCTP_CWND_NOCUM]++;
4016		}
4017	skip_cwnd_update:
4018		/*
4019		 * NOW, according to Karn's rule do we need to restore the
4020		 * RTO timer back? Check our net_ack2. If not set then we
4021		 * have a ambiguity.. i.e. all data ack'd was sent to more
4022		 * than one place.
4023		 */
4024
4025		if (net->net_ack2) {
4026			/* restore any doubled timers */
4027			net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4028			if (net->RTO < stcb->asoc.minrto) {
4029				net->RTO = stcb->asoc.minrto;
4030			}
4031			if (net->RTO > stcb->asoc.maxrto) {
4032				net->RTO = stcb->asoc.maxrto;
4033			}
4034		}
4035		if (net->cwnd > sctp_pegs[SCTP_MAX_CWND]) {
4036			sctp_pegs[SCTP_MAX_CWND] = net->cwnd;
4037		}
4038	}
4039	/**********************************/
4040	/* Now what about shutdown issues */
4041	/**********************************/
4042	some_on_streamwheel = 0;
4043	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
4044		/* Check to see if some data queued */
4045		struct sctp_stream_out *outs;
4046		TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
4047			if (!TAILQ_EMPTY(&outs->outqueue)) {
4048				some_on_streamwheel = 1;
4049				break;
4050			}
4051		}
4052	}
4053	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) &&
4054	    some_on_streamwheel == 0) {
4055		/* nothing left on sendqueue.. consider done */
4056		/* stop all timers */
4057#ifdef SCTP_LOG_RWND
4058		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4059				  asoc->peers_rwnd, 0, 0,  a_rwnd);
4060#endif
4061		asoc->peers_rwnd = a_rwnd;
4062		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4063			/* SWS sender side engages */
4064			asoc->peers_rwnd = 0;
4065		}
4066		/* stop any timers */
4067		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4068			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4069					stcb, net);
4070			net->flight_size = 0;
4071			net->partial_bytes_acked = 0;
4072		}
4073		asoc->total_flight = 0;
4074		asoc->total_flight_count = 0;
4075		/* clean up */
4076		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
4077			asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4078#ifdef SCTP_DEBUG
4079			if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4080				printf("%s:%d sends a shutdown\n",
4081				       __FILE__,
4082				       __LINE__
4083				       );
4084			}
4085#endif
4086			sctp_send_shutdown(stcb,
4087					   stcb->asoc.primary_destination);
4088			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4089					 stcb->sctp_ep, stcb, asoc->primary_destination);
4090			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4091					 stcb->sctp_ep, stcb, asoc->primary_destination);
4092		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) {
4093			asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4094
4095			sctp_send_shutdown_ack(stcb,
4096					       stcb->asoc.primary_destination);
4097
4098			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4099					 stcb->sctp_ep, stcb, asoc->primary_destination);
4100		}
4101		return;
4102	}
4103	/*
4104	 * Now here we are going to recycle net_ack for a different
4105	 * use... HEADS UP.
4106	 */
4107	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4108		net->net_ack = 0;
4109	}
4110	if ((num_seg > 0) && marking_allowed) {
4111		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4112					   strike_enabled, biggest_tsn_newly_acked, accum_moved);
4113	}
4114
4115	/*********************************************/
4116	/* Here we perform PR-SCTP procedures        */
4117	/* (section 4.2)                             */
4118	/*********************************************/
4119	/* C1. update advancedPeerAckPoint */
4120	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4121		asoc->advanced_peer_ack_point = cum_ack;
4122	}
4123	/* C2. try to further move advancedPeerAckPoint ahead */
4124	if (asoc->peer_supports_prsctp) {
4125		struct sctp_tmit_chunk *lchk;
4126		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4127		/* C3. See if we need to send a Fwd-TSN */
4128		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
4129				      MAX_TSN)) {
4130			/*
4131			 * ISSUE with ECN, see FWD-TSN processing for notes
4132			 * on issues that will occur when the ECN NONCE stuff
4133			 * is put into SCTP for cross checking.
4134			 */
4135			send_forward_tsn(stcb, asoc);
4136
4137			/* ECN Nonce: Disable Nonce Sum check when FWD TSN is sent and store resync tsn*/
4138			asoc->nonce_sum_check = 0;
4139			asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4140			if (lchk) {
4141				/* Assure a timer is up */
4142				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4143						 stcb->sctp_ep, stcb, lchk->whoTo);
4144			}
4145		}
4146	}
4147	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4148		if (asoc->fast_retran_loss_recovery == 0) {
4149			/* out of a RFC2582 Fast recovery window? */
4150			if (net->net_ack > 0) {
4151				/*
4152				 * per section 7.2.3, are there
4153				 * any destinations that had a fast
4154				 * retransmit to them. If so what we
4155				 * need to do is adjust ssthresh and
4156				 * cwnd.
4157				 */
4158				struct sctp_tmit_chunk *lchk;
4159#ifdef  SCTP_HIGH_SPEED
4160				sctp_hs_cwnd_decrease(net);
4161#else
4162#ifdef SCTP_CWND_LOGGING
4163				int old_cwnd = net->cwnd;
4164#endif
4165				net->ssthresh = net->cwnd / 2;
4166				if (net->ssthresh < (net->mtu*2)) {
4167					net->ssthresh = 2 * net->mtu;
4168				}
4169				net->cwnd = net->ssthresh;
4170#ifdef SCTP_CWND_LOGGING
4171				sctp_log_cwnd(net, (net->cwnd-old_cwnd),
4172					      SCTP_CWND_LOG_FROM_FR);
4173#endif
4174#endif
4175
4176				lchk = TAILQ_FIRST(&asoc->send_queue);
4177
4178				net->partial_bytes_acked = 0;
4179				/* Turn on fast recovery window */
4180				asoc->fast_retran_loss_recovery = 1;
4181				if (lchk == NULL) {
4182					/* Mark end of the window */
4183					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
4184				} else {
4185					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
4186				}
4187
4188
4189				/* Disable Nonce Sum Checking and store the resync tsn*/
4190				asoc->nonce_sum_check = 0;
4191				asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
4192
4193				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
4194						stcb->sctp_ep, stcb, net);
4195				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4196						 stcb->sctp_ep, stcb, net);
4197			}
4198		} else if (net->net_ack > 0) {
4199			/*
4200			 * Mark a peg that we WOULD have done a cwnd reduction
4201			 * but RFC2582 prevented this action.
4202			 */
4203			sctp_pegs[SCTP_FR_INAWINDOW]++;
4204		}
4205	}
4206
4207
4208	/******************************************************************
4209	 *  Here we do the stuff with ECN Nonce checking.
4210	 *  We basically check to see if the nonce sum flag was incorrect
4211	 *  or if resynchronization needs to be done. Also if we catch a
4212	 *  misbehaving receiver we give him the kick.
4213	 ******************************************************************/
4214
4215	if (asoc->ecn_nonce_allowed) {
4216		if (asoc->nonce_sum_check) {
4217			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
4218				if (asoc->nonce_wait_for_ecne == 0) {
4219					struct sctp_tmit_chunk *lchk;
4220					lchk = TAILQ_FIRST(&asoc->send_queue);
4221					asoc->nonce_wait_for_ecne = 1;
4222					if (lchk) {
4223						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4224					} else {
4225						asoc->nonce_wait_tsn = asoc->sending_seq;
4226					}
4227				} else {
4228					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4229					   (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4230						/* Misbehaving peer. We need to react to this guy */
4231						printf("Mis-behaving peer detected\n");
4232						asoc->ecn_allowed = 0;
4233						asoc->ecn_nonce_allowed = 0;
4234					}
4235				}
4236			}
4237		} else {
4238			/* See if Resynchronization Possible */
4239			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4240				asoc->nonce_sum_check = 1;
4241				/* now we must calculate what the base
4242				 * is. We do this based on two things, we know
4243				 * the total's for all the segments gap-acked
4244				 * in the SACK, its stored in ecn_seg_sums.
4245				 * We also know the SACK's nonce sum, its
4246				 * in nonce_sum_flag. So we can build a truth
4247				 * table to back-calculate the new value of asoc->nonce_sum_expect_base:
4248                                 *
4249                                 *   SACK-flag-Value         Seg-Sums              Base
4250				 *         0                    0                   0
4251				 *         1                    0                   1
4252				 *         0                    1                   1
4253				 *         1                    1                   0
4254				 */
4255				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4256			}
4257		}
4258	}
4259	/* Now are we exiting loss recovery ? */
4260	if (will_exit_fast_recovery) {
4261		/* Ok, we must exit fast recovery */
4262		asoc->fast_retran_loss_recovery = 0;
4263	}
4264	if ((asoc->sat_t3_loss_recovery) &&
4265	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
4266				MAX_TSN) ||
4267	      (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
4268		/* end satellite t3 loss recovery */
4269		asoc->sat_t3_loss_recovery = 0;
4270	}
4271	/* Adjust and set the new rwnd value */
4272#ifdef SCTP_LOG_RWND
4273	sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4274			  asoc->peers_rwnd,  asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
4275#endif
4276
4277	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4278					    (u_int32_t)(asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4279	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4280		/* SWS sender side engages */
4281		asoc->peers_rwnd = 0;
4282	}
4283	/*
4284	 * Now we must setup so we have a timer up for anyone with
4285	 * outstanding data.
4286	 */
4287	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4288		struct sctp_tmit_chunk *chk;
4289		TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4290			if (chk->whoTo == net &&
4291			    (chk->sent < SCTP_DATAGRAM_ACKED ||
4292			     chk->sent == SCTP_FORWARD_TSN_SKIP)) {
4293				/*
4294				 * Not ack'ed and still outstanding to this
4295				 * destination or marked and must be
4296				 * sacked after fwd-tsn sent.
4297				 */
4298				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4299						 stcb->sctp_ep, stcb, net);
4300				break;
4301			}
4302		}
4303	}
4304}
4305
4306void
4307sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
4308    struct sctp_nets *netp, int *abort_flag)
4309{
4310	/* Mutate a shutdown into a SACK */
4311	struct sctp_sack_chunk sack;
4312
4313	/* Copy cum-ack */
4314	sack.sack.cum_tsn_ack = cp->cumulative_tsn_ack;
4315	/* Arrange so a_rwnd does NOT change */
4316	sack.ch.chunk_type = SCTP_SELECTIVE_ACK;
4317	sack.ch.chunk_flags = 0;
4318	sack.ch.chunk_length = ntohs(sizeof(struct sctp_sack_chunk));
4319	sack.sack.a_rwnd =
4320	    htonl(stcb->asoc.peers_rwnd + stcb->asoc.total_flight);
4321	/*
4322	 * no gaps in this one. This may cause a temporal view to reneging,
4323	 * but hopefully the second chunk is a true SACK in the packet and
4324	 * will correct this view. One will come soon after no matter what
4325	 * to fix this.
4326	 */
4327	sack.sack.num_gap_ack_blks = 0;
4328	sack.sack.num_dup_tsns = 0;
4329	/* Now call the SACK processor */
4330	sctp_handle_sack(&sack, stcb, netp, abort_flag);
4331}
4332
4333static void
4334sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4335    struct sctp_stream_in *strmin)
4336{
4337	struct sctp_tmit_chunk *chk, *nchk;
4338	struct sctp_association *asoc;
4339	int tt;
4340
4341	asoc = &stcb->asoc;
4342	tt = strmin->last_sequence_delivered;
4343	/*
4344	 * First deliver anything prior to and including the stream no that
4345	 * came in
4346	 */
4347	chk = TAILQ_FIRST(&strmin->inqueue);
4348	while (chk) {
4349		nchk = TAILQ_NEXT(chk, sctp_next);
4350		if (compare_with_wrap(tt, chk->rec.data.stream_seq, MAX_SEQ) ||
4351		    (tt == chk->rec.data.stream_seq)) {
4352			/* this is deliverable now */
4353			TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4354			/* subtract pending on streams */
4355			asoc->size_on_all_streams -= chk->send_size;
4356			asoc->cnt_on_all_streams--;
4357			/* deliver it to at least the delivery-q */
4358			sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4359		} else {
4360			/* no more delivery now. */
4361			break;
4362		}
4363		chk = nchk;
4364	}
4365	/*
4366	 * now we must deliver things in queue the normal way  if any
4367	 * are now ready.
4368	 */
4369	tt = strmin->last_sequence_delivered + 1;
4370	chk = TAILQ_FIRST(&strmin->inqueue);
4371	while (chk) {
4372		nchk = TAILQ_NEXT(chk, sctp_next);
4373		if (tt == chk->rec.data.stream_seq) {
4374			/* this is deliverable now */
4375			TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4376			/* subtract pending on streams */
4377			asoc->size_on_all_streams -= chk->send_size;
4378			asoc->cnt_on_all_streams--;
4379			/* deliver it to at least the delivery-q */
4380			strmin->last_sequence_delivered =
4381			    chk->rec.data.stream_seq;
4382			sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4383			tt = strmin->last_sequence_delivered + 1;
4384		} else {
4385			break;
4386		}
4387		chk = nchk;
4388	}
4389
4390}
4391
4392void
4393sctp_handle_forward_tsn(struct sctp_tcb *stcb,
4394    struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
4395{
4396	/*
4397	 * ISSUES that MUST be fixed for ECN! When we are the
4398	 * sender of the forward TSN, when the SACK comes back
4399	 * that acknowledges the FWD-TSN we must reset the
4400	 * NONCE sum to match correctly. This will get quite
4401	 * tricky since we may have sent more data interveneing and
4402	 * must carefully account for what the SACK says on the
4403	 * nonce and any gaps that are reported. This work
4404	 * will NOT be done here, but I note it here since
4405	 * it is really related to PR-SCTP and FWD-TSN's
4406	 */
4407
4408	/* The pr-sctp fwd tsn */
4409	/*
4410	 * here we will perform all the data receiver side steps for
4411	 * processing FwdTSN, as required in by pr-sctp draft:
4412	 *
4413	 * Assume we get FwdTSN(x):
4414	 *
4415	 * 1) update local cumTSN to x
4416	 * 2) try to further advance cumTSN to x + others we have
4417	 * 3) examine and update re-ordering queue on pr-in-streams
4418	 * 4) clean up re-assembly queue
4419	 * 5) Send a sack to report where we are.
4420	 */
4421	struct sctp_strseq *stseq;
4422	struct sctp_association *asoc;
4423	u_int32_t new_cum_tsn, gap, back_out_htsn;
4424	unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
4425	struct sctp_stream_in *strm;
4426	struct sctp_tmit_chunk *chk, *at;
4427
4428	cumack_set_flag = 0;
4429	asoc = &stcb->asoc;
4430	cnt_gone = 0;
4431	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
4432#ifdef SCTP_DEBUG
4433		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4434			printf("Bad size too small/big fwd-tsn\n");
4435		}
4436#endif
4437		return;
4438	}
4439	m_size = (stcb->asoc.mapping_array_size << 3);
4440	/*************************************************************/
4441	/* 1. Here we update local cumTSN and shift the bitmap array */
4442	/*************************************************************/
4443	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
4444
4445	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
4446	    asoc->cumulative_tsn == new_cum_tsn) {
4447		/* Already got there ... */
4448		return;
4449	}
4450
4451	back_out_htsn = asoc->highest_tsn_inside_map;
4452	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
4453	    MAX_TSN)) {
4454		asoc->highest_tsn_inside_map = new_cum_tsn;
4455#ifdef SCTP_MAP_LOGGING
4456			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4457#endif
4458	}
4459	/*
4460	 * now we know the new TSN is more advanced, let's find the
4461	 * actual gap
4462	 */
4463	if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
4464			       MAX_TSN)) ||
4465	     (new_cum_tsn == asoc->mapping_array_base_tsn)) {
4466		gap = new_cum_tsn - asoc->mapping_array_base_tsn;
4467	} else {
4468		/* try to prevent underflow here */
4469		gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4470	}
4471
4472	if (gap >= m_size) {
4473		asoc->highest_tsn_inside_map = back_out_htsn;
4474		if ((long)gap > sctp_sbspace(&stcb->sctp_socket->so_rcv)) {
4475			/*
4476			 * out of range (of single byte chunks in the rwnd I
4477			 * give out)
4478			 * too questionable. better to drop it silently
4479			 */
4480			return;
4481		}
4482		if (asoc->highest_tsn_inside_map >
4483		    asoc->mapping_array_base_tsn) {
4484			gap = asoc->highest_tsn_inside_map -
4485			    asoc->mapping_array_base_tsn;
4486		} else {
4487			gap = asoc->highest_tsn_inside_map +
4488			    (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4489		}
4490		cumack_set_flag = 1;
4491	}
4492	for (i = 0; i <= gap; i++) {
4493		SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
4494	}
4495	/*
4496	 * Now after marking all, slide thing forward but no
4497	 * sack please.
4498	 */
4499	sctp_sack_check(stcb, 0, 0, abort_flag);
4500	if (*abort_flag)
4501		return;
4502
4503	if (cumack_set_flag) {
4504		/*
4505		 * fwd-tsn went outside my gap array - not a
4506		 * common occurrence. Do the same thing we
4507		 * do when a cookie-echo arrives.
4508		 */
4509		asoc->highest_tsn_inside_map =  new_cum_tsn - 1;
4510		asoc->mapping_array_base_tsn = new_cum_tsn;
4511		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4512#ifdef SCTP_MAP_LOGGING
4513		sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4514#endif
4515		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
4516	}
4517	/*************************************************************/
4518	/* 2. Clear up re-assembly queue                             */
4519	/*************************************************************/
4520
4521	/*
4522	 * First service it if pd-api is up, just in case we can
4523	 * progress it forward
4524	 */
4525	if (asoc->fragmented_delivery_inprogress) {
4526		sctp_service_reassembly(stcb, asoc, 0);
4527	}
4528	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
4529		/* For each one on here see if we need to toss it */
4530		/*
4531		 * For now large messages held on the reasmqueue that are
4532		 * complete will be tossed too. We could in theory do more
4533		 * work to spin through and stop after dumping one msg
4534		 * aka seeing the start of a new msg at the head, and call
4535		 * the delivery function... to see if it can be delivered...
4536		 * But for now we just dump everything on the queue.
4537		 */
4538		chk = TAILQ_FIRST(&asoc->reasmqueue);
4539		while (chk) {
4540			at = TAILQ_NEXT(chk, sctp_next);
4541			if (compare_with_wrap(asoc->cumulative_tsn,
4542			    chk->rec.data.TSN_seq, MAX_TSN) ||
4543			    asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
4544				/* It needs to be tossed */
4545				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4546				if (compare_with_wrap(chk->rec.data.TSN_seq,
4547				    asoc->tsn_last_delivered, MAX_TSN)) {
4548					asoc->tsn_last_delivered =
4549					    chk->rec.data.TSN_seq;
4550					asoc->str_of_pdapi =
4551					    chk->rec.data.stream_number;
4552					asoc->ssn_of_pdapi =
4553					    chk->rec.data.stream_seq;
4554					asoc->fragment_flags =
4555					    chk->rec.data.rcv_flags;
4556				}
4557				asoc->size_on_reasm_queue -= chk->send_size;
4558				asoc->cnt_on_reasm_queue--;
4559				cnt_gone++;
4560
4561				/* Clear up any stream problem */
4562				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
4563				    SCTP_DATA_UNORDERED &&
4564				    (compare_with_wrap(chk->rec.data.stream_seq,
4565				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
4566				    MAX_SEQ))) {
4567					/*
4568					 * We must dump forward this streams
4569					 * sequence number if the chunk is not
4570					 * unordered that is being skipped.
4571					 * There is a chance that if the peer
4572					 * does not include the last fragment
4573					 * in its FWD-TSN we WILL have a problem
4574					 * here since you would have a partial
4575					 * chunk in queue that may not be
4576					 * deliverable.
4577					 * Also if a Partial delivery API as
4578					 * started the user may get a partial
4579					 * chunk. The next read returning a new
4580					 * chunk... really ugly but I see no way
4581					 * around it! Maybe a notify??
4582					 */
4583					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
4584					    chk->rec.data.stream_seq;
4585				}
4586				if (chk->data) {
4587					sctp_m_freem(chk->data);
4588					chk->data = NULL;
4589				}
4590				sctp_free_remote_addr(chk->whoTo);
4591				SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4592				sctppcbinfo.ipi_count_chunk--;
4593				if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4594					panic("Chunk count is negative");
4595				}
4596				sctppcbinfo.ipi_gencnt_chunk++;
4597			} else {
4598				/*
4599				 * Ok we have gone beyond the end of the
4600				 * fwd-tsn's mark. Some checks...
4601				 */
4602				if ((asoc->fragmented_delivery_inprogress) &&
4603				    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4604					/* Special case PD-API is up and what we fwd-tsn'
4605					 * over includes one that had the LAST_FRAG. We
4606					 * no longer need to do the PD-API.
4607					 */
4608					asoc->fragmented_delivery_inprogress = 0;
4609					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4610					    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4611
4612				}
4613				break;
4614			}
4615			chk = at;
4616		}
4617	}
4618	if (asoc->fragmented_delivery_inprogress) {
4619		/*
4620		 * Ok we removed cnt_gone chunks in the PD-API queue that
4621		 * were being delivered. So now we must turn off the
4622		 * flag.
4623		 */
4624		sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4625		    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4626		asoc->fragmented_delivery_inprogress = 0;
4627	}
4628	/*************************************************************/
4629	/* 3. Update the PR-stream re-ordering queues                */
4630	/*************************************************************/
4631	stseq = (struct sctp_strseq *)((vaddr_t)fwd + sizeof(*fwd));
4632	fwd_sz -= sizeof(*fwd);
4633	{
4634		/* New method. */
4635		int num_str;
4636		num_str = fwd_sz/sizeof(struct sctp_strseq);
4637#ifdef SCTP_DEBUG
4638		if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4639			printf("Using NEW method, %d strseq's reported in FWD-TSN\n",
4640			    num_str);
4641		}
4642#endif
4643		for (i = 0; i < num_str; i++) {
4644			u_int16_t st;
4645#if 0
4646			unsigned char *xx;
4647			/* Convert */
4648			xx = (unsigned char *)&stseq[i];
4649#endif
4650			st = ntohs(stseq[i].stream);
4651			stseq[i].stream = st;
4652			st = ntohs(stseq[i].sequence);
4653			stseq[i].sequence = st;
4654			/* now process */
4655			if (stseq[i].stream > asoc->streamincnt) {
4656#ifdef SCTP_DEBUG
4657				if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4658					printf("Bogus stream number %d "
4659					    "streamincnt is %d\n",
4660					    stseq[i].stream, asoc->streamincnt);
4661				}
4662#endif
4663				/*
4664				 * It is arguable if we should continue. Since
4665				 * the peer sent bogus stream info we may be in
4666				 * deep trouble..
4667 				 * a return may be a better choice?
4668 				 */
4669 				continue;
4670 			}
4671			strm = &asoc->strmin[stseq[i].stream];
4672			if (compare_with_wrap(stseq[i].sequence,
4673			    strm->last_sequence_delivered, MAX_SEQ)) {
4674				/* Update the sequence number */
4675				strm->last_sequence_delivered =
4676				    stseq[i].sequence;
4677			}
4678			/* now kick the stream the new way */
4679			sctp_kick_prsctp_reorder_queue(stcb, strm);
4680		}
4681	}
4682}
4683