sctp_input.c revision 218129
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 218129 2011-01-31 11:50:11Z rrs $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_auth.h>
45#include <netinet/sctp_indata.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_bsd_addr.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/sctp_crc32.h>
50#include <netinet/udp.h>
51
52
53
54static void
55sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
56{
57	struct sctp_nets *net;
58
59	/*
60	 * This now not only stops all cookie timers it also stops any INIT
61	 * timers as well. This will make sure that the timers are stopped
62	 * in all collision cases.
63	 */
64	SCTP_TCB_LOCK_ASSERT(stcb);
65	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
66		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
67			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
68			    stcb->sctp_ep,
69			    stcb,
70			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
71		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
72			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
73			    stcb->sctp_ep,
74			    stcb,
75			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
76		}
77	}
78}
79
80/* INIT handler */
81static void
82sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
83    struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
84    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port)
85{
86	struct sctp_init *init;
87	struct mbuf *op_err;
88	uint32_t init_limit;
89
90	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
91	    stcb);
92	if (stcb == NULL) {
93		SCTP_INP_RLOCK(inp);
94		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
95			goto outnow;
96		}
97	}
98	op_err = NULL;
99	init = &cp->init;
100	/* First are we accepting? */
101	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
102		SCTPDBG(SCTP_DEBUG_INPUT2,
103		    "sctp_handle_init: Abort, so_qlimit:%d\n",
104		    inp->sctp_socket->so_qlimit);
105		/*
106		 * FIX ME ?? What about TCP model and we have a
107		 * match/restart case? Actually no fix is needed. the lookup
108		 * will always find the existing assoc so stcb would not be
109		 * NULL. It may be questionable to do this since we COULD
110		 * just send back the INIT-ACK and hope that the app did
111		 * accept()'s by the time the COOKIE was sent. But there is
112		 * a price to pay for COOKIE generation and I don't want to
113		 * pay it on the chance that the app will actually do some
114		 * accepts(). The App just looses and should NOT be in this
115		 * state :-)
116		 */
117		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
118		    vrf_id, port);
119		if (stcb)
120			*abort_no_unlock = 1;
121		goto outnow;
122	}
123	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
124		/* Invalid length */
125		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
126		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
127		    vrf_id, port);
128		if (stcb)
129			*abort_no_unlock = 1;
130		goto outnow;
131	}
132	/* validate parameters */
133	if (init->initiate_tag == 0) {
134		/* protocol error... send abort */
135		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
136		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
137		    vrf_id, port);
138		if (stcb)
139			*abort_no_unlock = 1;
140		goto outnow;
141	}
142	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
143		/* invalid parameter... send abort */
144		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
145		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
146		    vrf_id, port);
147		if (stcb)
148			*abort_no_unlock = 1;
149		goto outnow;
150	}
151	if (init->num_inbound_streams == 0) {
152		/* protocol error... send abort */
153		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
154		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
155		    vrf_id, port);
156		if (stcb)
157			*abort_no_unlock = 1;
158		goto outnow;
159	}
160	if (init->num_outbound_streams == 0) {
161		/* protocol error... send abort */
162		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
163		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
164		    vrf_id, port);
165		if (stcb)
166			*abort_no_unlock = 1;
167		goto outnow;
168	}
169	init_limit = offset + ntohs(cp->ch.chunk_length);
170	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
171	    init_limit)) {
172		/* auth parameter(s) error... send abort */
173		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port);
174		if (stcb)
175			*abort_no_unlock = 1;
176		goto outnow;
177	}
178	/* send an INIT-ACK w/cookie */
179	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
180	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port,
181	    ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
182outnow:
183	if (stcb == NULL) {
184		SCTP_INP_RUNLOCK(inp);
185	}
186}
187
188/*
189 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
190 */
191
192int
193sctp_is_there_unsent_data(struct sctp_tcb *stcb)
194{
195	int unsent_data = 0;
196	unsigned int i;
197	struct sctp_stream_queue_pending *sp;
198	struct sctp_association *asoc;
199
200	/*
201	 * This function returns the number of streams that have true unsent
202	 * data on them. Note that as it looks through it will clean up any
203	 * places that have old data that has been sent but left at top of
204	 * stream queue.
205	 */
206	asoc = &stcb->asoc;
207	SCTP_TCB_SEND_LOCK(stcb);
208	if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
209		/* Check to see if some data queued */
210		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
211			/* sa_ignore FREED_MEMORY */
212			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
213			if (sp == NULL) {
214				continue;
215			}
216			if ((sp->msg_is_complete) &&
217			    (sp->length == 0) &&
218			    (sp->sender_all_done)) {
219				/*
220				 * We are doing differed cleanup. Last time
221				 * through when we took all the data the
222				 * sender_all_done was not set.
223				 */
224				if (sp->put_last_out == 0) {
225					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
226					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
227					    sp->sender_all_done,
228					    sp->length,
229					    sp->msg_is_complete,
230					    sp->put_last_out);
231				}
232				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
233				TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
234				if (sp->net) {
235					sctp_free_remote_addr(sp->net);
236					sp->net = NULL;
237				}
238				if (sp->data) {
239					sctp_m_freem(sp->data);
240					sp->data = NULL;
241				}
242				sctp_free_a_strmoq(stcb, sp);
243			} else {
244				unsent_data++;
245				break;
246			}
247		}
248	}
249	SCTP_TCB_SEND_UNLOCK(stcb);
250	return (unsent_data);
251}
252
253static int
254sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
255    struct sctp_nets *net)
256{
257	struct sctp_init *init;
258	struct sctp_association *asoc;
259	struct sctp_nets *lnet;
260	unsigned int i;
261
262	init = &cp->init;
263	asoc = &stcb->asoc;
264	/* save off parameters */
265	asoc->peer_vtag = ntohl(init->initiate_tag);
266	asoc->peers_rwnd = ntohl(init->a_rwnd);
267	/* init tsn's */
268	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
269
270	if (!TAILQ_EMPTY(&asoc->nets)) {
271		/* update any ssthresh's that may have a default */
272		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
273			lnet->ssthresh = asoc->peers_rwnd;
274			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
275				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
276			}
277		}
278	}
279	SCTP_TCB_SEND_LOCK(stcb);
280	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
281		unsigned int newcnt;
282		struct sctp_stream_out *outs;
283		struct sctp_stream_queue_pending *sp, *nsp;
284		struct sctp_tmit_chunk *chk, *nchk;
285
286		/* abandon the upper streams */
287		newcnt = ntohs(init->num_inbound_streams);
288		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
289			if (chk->rec.data.stream_number >= newcnt) {
290				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
291				asoc->send_queue_cnt--;
292				if (chk->data != NULL) {
293					sctp_free_bufspace(stcb, asoc, chk, 1);
294					sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
295					    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, SCTP_SO_NOT_LOCKED);
296					if (chk->data) {
297						sctp_m_freem(chk->data);
298						chk->data = NULL;
299					}
300				}
301				sctp_free_a_chunk(stcb, chk);
302				/* sa_ignore FREED_MEMORY */
303			}
304		}
305		if (asoc->strmout) {
306			for (i = newcnt; i < asoc->pre_open_streams; i++) {
307				outs = &asoc->strmout[i];
308				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
309					TAILQ_REMOVE(&outs->outqueue, sp, next);
310					asoc->stream_queue_cnt--;
311					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
312					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
313					    sp, SCTP_SO_NOT_LOCKED);
314					if (sp->data) {
315						sctp_m_freem(sp->data);
316						sp->data = NULL;
317					}
318					if (sp->net) {
319						sctp_free_remote_addr(sp->net);
320						sp->net = NULL;
321					}
322					/* Free the chunk */
323					sctp_free_a_strmoq(stcb, sp);
324					/* sa_ignore FREED_MEMORY */
325				}
326			}
327		}
328		/* cut back the count */
329		asoc->pre_open_streams = newcnt;
330	}
331	SCTP_TCB_SEND_UNLOCK(stcb);
332	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
333
334	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
335	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
336	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
337		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
338	}
339	/* This is the next one we expect */
340	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
341
342	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
343	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
344
345	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
346	/* open the requested streams */
347
348	if (asoc->strmin != NULL) {
349		/* Free the old ones */
350		struct sctp_queued_to_read *ctl, *nctl;
351
352		for (i = 0; i < asoc->streamincnt; i++) {
353			TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) {
354				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
355				sctp_free_remote_addr(ctl->whoFrom);
356				ctl->whoFrom = NULL;
357				sctp_m_freem(ctl->data);
358				ctl->data = NULL;
359				sctp_free_a_readq(stcb, ctl);
360			}
361		}
362		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
363	}
364	asoc->streamincnt = ntohs(init->num_outbound_streams);
365	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
366		asoc->streamincnt = MAX_SCTP_STREAMS;
367	}
368	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
369	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
370	if (asoc->strmin == NULL) {
371		/* we didn't get memory for the streams! */
372		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
373		return (-1);
374	}
375	for (i = 0; i < asoc->streamincnt; i++) {
376		asoc->strmin[i].stream_no = i;
377		asoc->strmin[i].last_sequence_delivered = 0xffff;
378		/*
379		 * U-stream ranges will be set when the cookie is unpacked.
380		 * Or for the INIT sender they are un set (if pr-sctp not
381		 * supported) when the INIT-ACK arrives.
382		 */
383		TAILQ_INIT(&asoc->strmin[i].inqueue);
384		asoc->strmin[i].delivery_started = 0;
385	}
386	/*
387	 * load_address_from_init will put the addresses into the
388	 * association when the COOKIE is processed or the INIT-ACK is
389	 * processed. Both types of COOKIE's existing and new call this
390	 * routine. It will remove addresses that are no longer in the
391	 * association (for the restarting case where addresses are
392	 * removed). Up front when the INIT arrives we will discard it if it
393	 * is a restart and new addresses have been added.
394	 */
395	/* sa_ignore MEMLEAK */
396	return (0);
397}
398
399/*
400 * INIT-ACK message processing/consumption returns value < 0 on error
401 */
402static int
403sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
404    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
405    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
406{
407	struct sctp_association *asoc;
408	struct mbuf *op_err;
409	int retval, abort_flag;
410	uint32_t initack_limit;
411	int nat_friendly = 0;
412
413	/* First verify that we have no illegal param's */
414	abort_flag = 0;
415	op_err = NULL;
416
417	op_err = sctp_arethere_unrecognized_parameters(m,
418	    (offset + sizeof(struct sctp_init_chunk)),
419	    &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
420	if (abort_flag) {
421		/* Send an abort and notify peer */
422		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
423		*abort_no_unlock = 1;
424		return (-1);
425	}
426	asoc = &stcb->asoc;
427	asoc->peer_supports_nat = (uint8_t) nat_friendly;
428	/* process the peer's parameters in the INIT-ACK */
429	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
430	if (retval < 0) {
431		return (retval);
432	}
433	initack_limit = offset + ntohs(cp->ch.chunk_length);
434	/* load all addresses */
435	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
436	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
437	    NULL))) {
438		/* Huh, we should abort */
439		SCTPDBG(SCTP_DEBUG_INPUT1,
440		    "Load addresses from INIT causes an abort %d\n",
441		    retval);
442		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
443		    NULL, 0, net->port);
444		*abort_no_unlock = 1;
445		return (-1);
446	}
447	/* if the peer doesn't support asconf, flush the asconf queue */
448	if (asoc->peer_supports_asconf == 0) {
449		struct sctp_asconf_addr *param, *nparam;
450
451		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
452			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
453			SCTP_FREE(param, SCTP_M_ASC_ADDR);
454		}
455	}
456	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
457	    stcb->asoc.local_hmacs);
458	if (op_err) {
459		sctp_queue_op_err(stcb, op_err);
460		/* queuing will steal away the mbuf chain to the out queue */
461		op_err = NULL;
462	}
463	/* extract the cookie and queue it to "echo" it back... */
464	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
465		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
466		    stcb->asoc.overall_error_count,
467		    0,
468		    SCTP_FROM_SCTP_INPUT,
469		    __LINE__);
470	}
471	stcb->asoc.overall_error_count = 0;
472	net->error_count = 0;
473
474	/*
475	 * Cancel the INIT timer, We do this first before queueing the
476	 * cookie. We always cancel at the primary to assue that we are
477	 * canceling the timer started by the INIT which always goes to the
478	 * primary.
479	 */
480	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
481	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
482
483	/* calculate the RTO */
484	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy);
485
486	retval = sctp_send_cookie_echo(m, offset, stcb, net);
487	if (retval < 0) {
488		/*
489		 * No cookie, we probably should send a op error. But in any
490		 * case if there is no cookie in the INIT-ACK, we can
491		 * abandon the peer, its broke.
492		 */
493		if (retval == -3) {
494			/* We abort with an error of missing mandatory param */
495			op_err =
496			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
497			if (op_err) {
498				/*
499				 * Expand beyond to include the mandatory
500				 * param cookie
501				 */
502				struct sctp_inv_mandatory_param *mp;
503
504				SCTP_BUF_LEN(op_err) =
505				    sizeof(struct sctp_inv_mandatory_param);
506				mp = mtod(op_err,
507				    struct sctp_inv_mandatory_param *);
508				/* Subtract the reserved param */
509				mp->length =
510				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
511				mp->num_param = htonl(1);
512				mp->param = htons(SCTP_STATE_COOKIE);
513				mp->resv = 0;
514			}
515			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
516			    sh, op_err, 0, net->port);
517			*abort_no_unlock = 1;
518		}
519		return (retval);
520	}
521	return (0);
522}
523
524static void
525sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
526    struct sctp_tcb *stcb, struct sctp_nets *net)
527{
528	struct sockaddr_storage store;
529	struct sockaddr_in *sin;
530	struct sockaddr_in6 *sin6;
531	struct sctp_nets *r_net, *f_net;
532	struct timeval tv;
533	int req_prim = 0;
534
535	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
536		/* Invalid length */
537		return;
538	}
539	sin = (struct sockaddr_in *)&store;
540	sin6 = (struct sockaddr_in6 *)&store;
541
542	memset(&store, 0, sizeof(store));
543	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
544	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
545		sin->sin_family = cp->heartbeat.hb_info.addr_family;
546		sin->sin_len = cp->heartbeat.hb_info.addr_len;
547		sin->sin_port = stcb->rport;
548		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
549		    sizeof(sin->sin_addr));
550	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
551	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
552		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
553		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
554		sin6->sin6_port = stcb->rport;
555		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
556		    sizeof(sin6->sin6_addr));
557	} else {
558		return;
559	}
560	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
561	if (r_net == NULL) {
562		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
563		return;
564	}
565	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
566	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
567	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
568		/*
569		 * If the its a HB and it's random value is correct when can
570		 * confirm the destination.
571		 */
572		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
573		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
574			stcb->asoc.primary_destination = r_net;
575			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
576			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
577			f_net = TAILQ_FIRST(&stcb->asoc.nets);
578			if (f_net != r_net) {
579				/*
580				 * first one on the list is NOT the primary
581				 * sctp_cmpaddr() is much more efficent if
582				 * the primary is the first on the list,
583				 * make it so.
584				 */
585				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
586				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
587			}
588			req_prim = 1;
589		}
590		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
591		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
592	}
593	r_net->error_count = 0;
594	r_net->hb_responded = 1;
595	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
596	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
597	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
598		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
599		r_net->dest_state |= SCTP_ADDR_REACHABLE;
600		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
601		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED);
602		/* now was it the primary? if so restore */
603		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
604			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
605		}
606	}
607	/*
608	 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state,
609	 * set the destination to active state and set the cwnd to one or
610	 * two MTU's based on whether PF1 or PF2 is being used. If a T3
611	 * timer is running, for the destination, stop the timer because a
612	 * PF-heartbeat was received.
613	 */
614	if ((stcb->asoc.sctp_cmt_on_off > 0) &&
615	    (stcb->asoc.sctp_cmt_pf > 0) &&
616	    ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
617		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
618			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
619			    stcb, net,
620			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
621		}
622		net->dest_state &= ~SCTP_ADDR_PF;
623		net->cwnd = net->mtu * stcb->asoc.sctp_cmt_pf;
624		SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
625		    net, net->cwnd);
626	}
627	/* Now lets do a RTO with this */
628	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy);
629	/* Mobility adaptation */
630	if (req_prim) {
631		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
632		    SCTP_MOBILITY_BASE) ||
633		    sctp_is_mobility_feature_on(stcb->sctp_ep,
634		    SCTP_MOBILITY_FASTHANDOFF)) &&
635		    sctp_is_mobility_feature_on(stcb->sctp_ep,
636		    SCTP_MOBILITY_PRIM_DELETED)) {
637
638			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
639			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
640			    SCTP_MOBILITY_FASTHANDOFF)) {
641				sctp_assoc_immediate_retrans(stcb,
642				    stcb->asoc.primary_destination);
643			}
644			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
645			    SCTP_MOBILITY_BASE)) {
646				sctp_move_chunks_from_net(stcb,
647				    stcb->asoc.deleted_primary);
648			}
649			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
650			    stcb->asoc.deleted_primary);
651		}
652	}
653}
654
655static int
656sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
657{
658	/*
659	 * return 0 means we want you to proceed with the abort non-zero
660	 * means no abort processing
661	 */
662	struct sctpasochead *head;
663
664	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
665		/* generate a new vtag and send init */
666		LIST_REMOVE(stcb, sctp_asocs);
667		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
668		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
669		/*
670		 * put it in the bucket in the vtag hash of assoc's for the
671		 * system
672		 */
673		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
674		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
675		return (1);
676	}
677	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
678		/*
679		 * treat like a case where the cookie expired i.e.: - dump
680		 * current cookie. - generate a new vtag. - resend init.
681		 */
682		/* generate a new vtag and send init */
683		LIST_REMOVE(stcb, sctp_asocs);
684		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
685		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
686		sctp_stop_all_cookie_timers(stcb);
687		sctp_toss_old_cookies(stcb, &stcb->asoc);
688		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
689		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
690		/*
691		 * put it in the bucket in the vtag hash of assoc's for the
692		 * system
693		 */
694		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
695		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
696		return (1);
697	}
698	return (0);
699}
700
701static int
702sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
703    struct sctp_nets *net)
704{
705	/*
706	 * return 0 means we want you to proceed with the abort non-zero
707	 * means no abort processing
708	 */
709	if (stcb->asoc.peer_supports_auth == 0) {
710		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
711		return (0);
712	}
713	sctp_asconf_send_nat_state_update(stcb, net);
714	return (1);
715}
716
717
718static void
719sctp_handle_abort(struct sctp_abort_chunk *cp,
720    struct sctp_tcb *stcb, struct sctp_nets *net)
721{
722#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
723	struct socket *so;
724
725#endif
726	uint16_t len;
727
728	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
729	if (stcb == NULL)
730		return;
731
732	len = ntohs(cp->ch.chunk_length);
733	if (len > sizeof(struct sctp_chunkhdr)) {
734		/*
735		 * Need to check the cause codes for our two magic nat
736		 * aborts which don't kill the assoc necessarily.
737		 */
738		struct sctp_abort_chunk *cpnext;
739		struct sctp_missing_nat_state *natc;
740		uint16_t cause;
741
742		cpnext = cp;
743		cpnext++;
744		natc = (struct sctp_missing_nat_state *)cpnext;
745		cause = ntohs(natc->cause);
746		if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) {
747			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
748			    cp->ch.chunk_flags);
749			if (sctp_handle_nat_colliding_state(stcb)) {
750				return;
751			}
752		} else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) {
753			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
754			    cp->ch.chunk_flags);
755			if (sctp_handle_nat_missing_state(stcb, net)) {
756				return;
757			}
758		}
759	}
760	/* stop any receive timers */
761	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
762	/* notify user of the abort and clean up... */
763	sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
764	/* free the tcb */
765#if defined(SCTP_PANIC_ON_ABORT)
766	printf("stcb:%p state:%d rport:%d net:%p\n",
767	    stcb, stcb->asoc.state, stcb->rport, net);
768	if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
769		panic("Received an ABORT");
770	} else {
771		printf("No panic its in state %x closed\n", stcb->asoc.state);
772	}
773#endif
774	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
775	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
776	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
777		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
778	}
779#ifdef SCTP_ASOCLOG_OF_TSNS
780	sctp_print_out_track_log(stcb);
781#endif
782#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
783	so = SCTP_INP_SO(stcb->sctp_ep);
784	atomic_add_int(&stcb->asoc.refcnt, 1);
785	SCTP_TCB_UNLOCK(stcb);
786	SCTP_SOCKET_LOCK(so, 1);
787	SCTP_TCB_LOCK(stcb);
788	atomic_subtract_int(&stcb->asoc.refcnt, 1);
789#endif
790	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
791	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
792	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
793#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
794	SCTP_SOCKET_UNLOCK(so, 1);
795#endif
796	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
797}
798
799static void
800sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
801    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
802{
803	struct sctp_association *asoc;
804	int some_on_streamwheel;
805
806#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
807	struct socket *so;
808
809#endif
810
811	SCTPDBG(SCTP_DEBUG_INPUT2,
812	    "sctp_handle_shutdown: handling SHUTDOWN\n");
813	if (stcb == NULL)
814		return;
815	asoc = &stcb->asoc;
816	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
817	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
818		return;
819	}
820	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
821		/* Shutdown NOT the expected size */
822		return;
823	} else {
824		sctp_update_acked(stcb, cp, net, abort_flag);
825		if (*abort_flag) {
826			return;
827		}
828	}
829	if (asoc->control_pdapi) {
830		/*
831		 * With a normal shutdown we assume the end of last record.
832		 */
833		SCTP_INP_READ_LOCK(stcb->sctp_ep);
834		asoc->control_pdapi->end_added = 1;
835		asoc->control_pdapi->pdapi_aborted = 1;
836		asoc->control_pdapi = NULL;
837		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
838#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
839		so = SCTP_INP_SO(stcb->sctp_ep);
840		atomic_add_int(&stcb->asoc.refcnt, 1);
841		SCTP_TCB_UNLOCK(stcb);
842		SCTP_SOCKET_LOCK(so, 1);
843		SCTP_TCB_LOCK(stcb);
844		atomic_subtract_int(&stcb->asoc.refcnt, 1);
845		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
846			/* assoc was freed while we were unlocked */
847			SCTP_SOCKET_UNLOCK(so, 1);
848			return;
849		}
850#endif
851		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
852#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
853		SCTP_SOCKET_UNLOCK(so, 1);
854#endif
855	}
856	/* goto SHUTDOWN_RECEIVED state to block new requests */
857	if (stcb->sctp_socket) {
858		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
859		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
860		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
861			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
862			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
863			/*
864			 * notify upper layer that peer has initiated a
865			 * shutdown
866			 */
867			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
868
869			/* reset time */
870			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
871		}
872	}
873	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
874		/*
875		 * stop the shutdown timer, since we WILL move to
876		 * SHUTDOWN-ACK-SENT.
877		 */
878		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
879	}
880	/* Now is there unsent data on a stream somewhere? */
881	some_on_streamwheel = sctp_is_there_unsent_data(stcb);
882
883	if (!TAILQ_EMPTY(&asoc->send_queue) ||
884	    !TAILQ_EMPTY(&asoc->sent_queue) ||
885	    some_on_streamwheel) {
886		/* By returning we will push more data out */
887		return;
888	} else {
889		/* no outstanding data to send, so move on... */
890		/* send SHUTDOWN-ACK */
891		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
892		/* move to SHUTDOWN-ACK-SENT state */
893		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
894		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
895			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
896		}
897		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
898		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
899		sctp_stop_timers_for_shutdown(stcb);
900		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
901		    stcb, net);
902	}
903}
904
905static void
906sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
907    struct sctp_tcb *stcb,
908    struct sctp_nets *net)
909{
910	struct sctp_association *asoc;
911
912#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
913	struct socket *so;
914
915	so = SCTP_INP_SO(stcb->sctp_ep);
916#endif
917	SCTPDBG(SCTP_DEBUG_INPUT2,
918	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
919	if (stcb == NULL)
920		return;
921
922	asoc = &stcb->asoc;
923	/* process according to association state */
924	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
925	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
926		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
927		sctp_send_shutdown_complete(stcb, net, 1);
928		SCTP_TCB_UNLOCK(stcb);
929		return;
930	}
931	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
932	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
933		/* unexpected SHUTDOWN-ACK... so ignore... */
934		SCTP_TCB_UNLOCK(stcb);
935		return;
936	}
937	if (asoc->control_pdapi) {
938		/*
939		 * With a normal shutdown we assume the end of last record.
940		 */
941		SCTP_INP_READ_LOCK(stcb->sctp_ep);
942		asoc->control_pdapi->end_added = 1;
943		asoc->control_pdapi->pdapi_aborted = 1;
944		asoc->control_pdapi = NULL;
945		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
946#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
947		atomic_add_int(&stcb->asoc.refcnt, 1);
948		SCTP_TCB_UNLOCK(stcb);
949		SCTP_SOCKET_LOCK(so, 1);
950		SCTP_TCB_LOCK(stcb);
951		atomic_subtract_int(&stcb->asoc.refcnt, 1);
952		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
953			/* assoc was freed while we were unlocked */
954			SCTP_SOCKET_UNLOCK(so, 1);
955			return;
956		}
957#endif
958		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
959#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
960		SCTP_SOCKET_UNLOCK(so, 1);
961#endif
962	}
963	/* are the queues empty? */
964	if (!TAILQ_EMPTY(&asoc->send_queue) ||
965	    !TAILQ_EMPTY(&asoc->sent_queue) ||
966	    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
967		sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
968	}
969	/* stop the timer */
970	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
971	/* send SHUTDOWN-COMPLETE */
972	sctp_send_shutdown_complete(stcb, net, 0);
973	/* notify upper layer protocol */
974	if (stcb->sctp_socket) {
975		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
976		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
977		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
978			/* Set the connected flag to disconnected */
979			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
980		}
981	}
982	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
983	/* free the TCB but first save off the ep */
984#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
985	atomic_add_int(&stcb->asoc.refcnt, 1);
986	SCTP_TCB_UNLOCK(stcb);
987	SCTP_SOCKET_LOCK(so, 1);
988	SCTP_TCB_LOCK(stcb);
989	atomic_subtract_int(&stcb->asoc.refcnt, 1);
990#endif
991	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
992	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
993#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
994	SCTP_SOCKET_UNLOCK(so, 1);
995#endif
996}
997
998/*
999 * Skip past the param header and then we will find the chunk that caused the
1000 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
1001 * our peer must be broken.
1002 */
1003static void
1004sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
1005    struct sctp_nets *net)
1006{
1007	struct sctp_chunkhdr *chk;
1008
1009	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
1010	switch (chk->chunk_type) {
1011	case SCTP_ASCONF_ACK:
1012	case SCTP_ASCONF:
1013		sctp_asconf_cleanup(stcb, net);
1014		break;
1015	case SCTP_FORWARD_CUM_TSN:
1016		stcb->asoc.peer_supports_prsctp = 0;
1017		break;
1018	default:
1019		SCTPDBG(SCTP_DEBUG_INPUT2,
1020		    "Peer does not support chunk type %d(%x)??\n",
1021		    chk->chunk_type, (uint32_t) chk->chunk_type);
1022		break;
1023	}
1024}
1025
1026/*
1027 * Skip past the param header and then we will find the param that caused the
1028 * problem.  There are a number of param's in a ASCONF OR the prsctp param
1029 * these will turn of specific features.
1030 */
1031static void
1032sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1033{
1034	struct sctp_paramhdr *pbad;
1035
1036	pbad = phdr + 1;
1037	switch (ntohs(pbad->param_type)) {
1038		/* pr-sctp draft */
1039	case SCTP_PRSCTP_SUPPORTED:
1040		stcb->asoc.peer_supports_prsctp = 0;
1041		break;
1042	case SCTP_SUPPORTED_CHUNK_EXT:
1043		break;
1044		/* draft-ietf-tsvwg-addip-sctp */
1045	case SCTP_HAS_NAT_SUPPORT:
1046		stcb->asoc.peer_supports_nat = 0;
1047		break;
1048	case SCTP_ADD_IP_ADDRESS:
1049	case SCTP_DEL_IP_ADDRESS:
1050	case SCTP_SET_PRIM_ADDR:
1051		stcb->asoc.peer_supports_asconf = 0;
1052		break;
1053	case SCTP_SUCCESS_REPORT:
1054	case SCTP_ERROR_CAUSE_IND:
1055		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1056		SCTPDBG(SCTP_DEBUG_INPUT2,
1057		    "Turning off ASCONF to this strange peer\n");
1058		stcb->asoc.peer_supports_asconf = 0;
1059		break;
1060	default:
1061		SCTPDBG(SCTP_DEBUG_INPUT2,
1062		    "Peer does not support param type %d(%x)??\n",
1063		    pbad->param_type, (uint32_t) pbad->param_type);
1064		break;
1065	}
1066}
1067
1068static int
1069sctp_handle_error(struct sctp_chunkhdr *ch,
1070    struct sctp_tcb *stcb, struct sctp_nets *net)
1071{
1072	int chklen;
1073	struct sctp_paramhdr *phdr;
1074	uint16_t error_type;
1075	uint16_t error_len;
1076	struct sctp_association *asoc;
1077	int adjust;
1078
1079#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1080	struct socket *so;
1081
1082#endif
1083
1084	/* parse through all of the errors and process */
1085	asoc = &stcb->asoc;
1086	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1087	    sizeof(struct sctp_chunkhdr));
1088	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1089	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1090		/* Process an Error Cause */
1091		error_type = ntohs(phdr->param_type);
1092		error_len = ntohs(phdr->param_length);
1093		if ((error_len > chklen) || (error_len == 0)) {
1094			/* invalid param length for this param */
1095			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1096			    chklen, error_len);
1097			return (0);
1098		}
1099		switch (error_type) {
1100		case SCTP_CAUSE_INVALID_STREAM:
1101		case SCTP_CAUSE_MISSING_PARAM:
1102		case SCTP_CAUSE_INVALID_PARAM:
1103		case SCTP_CAUSE_NO_USER_DATA:
1104			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1105			    error_type);
1106			break;
1107		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1108			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1109			    ch->chunk_flags);
1110			if (sctp_handle_nat_colliding_state(stcb)) {
1111				return (0);
1112			}
1113			break;
1114		case SCTP_CAUSE_NAT_MISSING_STATE:
1115			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1116			    ch->chunk_flags);
1117			if (sctp_handle_nat_missing_state(stcb, net)) {
1118				return (0);
1119			}
1120			break;
1121		case SCTP_CAUSE_STALE_COOKIE:
1122			/*
1123			 * We only act if we have echoed a cookie and are
1124			 * waiting.
1125			 */
1126			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1127				int *p;
1128
1129				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1130				/* Save the time doubled */
1131				asoc->cookie_preserve_req = ntohl(*p) << 1;
1132				asoc->stale_cookie_count++;
1133				if (asoc->stale_cookie_count >
1134				    asoc->max_init_times) {
1135					sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
1136					/* now free the asoc */
1137#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1138					so = SCTP_INP_SO(stcb->sctp_ep);
1139					atomic_add_int(&stcb->asoc.refcnt, 1);
1140					SCTP_TCB_UNLOCK(stcb);
1141					SCTP_SOCKET_LOCK(so, 1);
1142					SCTP_TCB_LOCK(stcb);
1143					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1144#endif
1145					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1146					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1147#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1148					SCTP_SOCKET_UNLOCK(so, 1);
1149#endif
1150					return (-1);
1151				}
1152				/* blast back to INIT state */
1153				sctp_toss_old_cookies(stcb, &stcb->asoc);
1154				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1155				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1156				sctp_stop_all_cookie_timers(stcb);
1157				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1158			}
1159			break;
1160		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1161			/*
1162			 * Nothing we can do here, we don't do hostname
1163			 * addresses so if the peer does not like my IPv6
1164			 * (or IPv4 for that matter) it does not matter. If
1165			 * they don't support that type of address, they can
1166			 * NOT possibly get that packet type... i.e. with no
1167			 * IPv6 you can't recieve a IPv6 packet. so we can
1168			 * safely ignore this one. If we ever added support
1169			 * for HOSTNAME Addresses, then we would need to do
1170			 * something here.
1171			 */
1172			break;
1173		case SCTP_CAUSE_UNRECOG_CHUNK:
1174			sctp_process_unrecog_chunk(stcb, phdr, net);
1175			break;
1176		case SCTP_CAUSE_UNRECOG_PARAM:
1177			sctp_process_unrecog_param(stcb, phdr);
1178			break;
1179		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1180			/*
1181			 * We ignore this since the timer will drive out a
1182			 * new cookie anyway and there timer will drive us
1183			 * to send a SHUTDOWN_COMPLETE. We can't send one
1184			 * here since we don't have their tag.
1185			 */
1186			break;
1187		case SCTP_CAUSE_DELETING_LAST_ADDR:
1188		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1189		case SCTP_CAUSE_DELETING_SRC_ADDR:
1190			/*
1191			 * We should NOT get these here, but in a
1192			 * ASCONF-ACK.
1193			 */
1194			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1195			    error_type);
1196			break;
1197		case SCTP_CAUSE_OUT_OF_RESC:
1198			/*
1199			 * And what, pray tell do we do with the fact that
1200			 * the peer is out of resources? Not really sure we
1201			 * could do anything but abort. I suspect this
1202			 * should have came WITH an abort instead of in a
1203			 * OP-ERROR.
1204			 */
1205			break;
1206		default:
1207			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1208			    error_type);
1209			break;
1210		}
1211		adjust = SCTP_SIZE32(error_len);
1212		chklen -= adjust;
1213		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1214	}
1215	return (0);
1216}
1217
1218static int
1219sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1220    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1221    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
1222{
1223	struct sctp_init_ack *init_ack;
1224	struct mbuf *op_err;
1225
1226	SCTPDBG(SCTP_DEBUG_INPUT2,
1227	    "sctp_handle_init_ack: handling INIT-ACK\n");
1228
1229	if (stcb == NULL) {
1230		SCTPDBG(SCTP_DEBUG_INPUT2,
1231		    "sctp_handle_init_ack: TCB is null\n");
1232		return (-1);
1233	}
1234	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1235		/* Invalid length */
1236		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1237		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1238		    op_err, 0, net->port);
1239		*abort_no_unlock = 1;
1240		return (-1);
1241	}
1242	init_ack = &cp->init;
1243	/* validate parameters */
1244	if (init_ack->initiate_tag == 0) {
1245		/* protocol error... send an abort */
1246		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1247		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1248		    op_err, 0, net->port);
1249		*abort_no_unlock = 1;
1250		return (-1);
1251	}
1252	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1253		/* protocol error... send an abort */
1254		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1255		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1256		    op_err, 0, net->port);
1257		*abort_no_unlock = 1;
1258		return (-1);
1259	}
1260	if (init_ack->num_inbound_streams == 0) {
1261		/* protocol error... send an abort */
1262		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1263		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1264		    op_err, 0, net->port);
1265		*abort_no_unlock = 1;
1266		return (-1);
1267	}
1268	if (init_ack->num_outbound_streams == 0) {
1269		/* protocol error... send an abort */
1270		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1271		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1272		    op_err, 0, net->port);
1273		*abort_no_unlock = 1;
1274		return (-1);
1275	}
1276	/* process according to association state... */
1277	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1278	case SCTP_STATE_COOKIE_WAIT:
1279		/* this is the expected state for this chunk */
1280		/* process the INIT-ACK parameters */
1281		if (stcb->asoc.primary_destination->dest_state &
1282		    SCTP_ADDR_UNCONFIRMED) {
1283			/*
1284			 * The primary is where we sent the INIT, we can
1285			 * always consider it confirmed when the INIT-ACK is
1286			 * returned. Do this before we load addresses
1287			 * though.
1288			 */
1289			stcb->asoc.primary_destination->dest_state &=
1290			    ~SCTP_ADDR_UNCONFIRMED;
1291			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1292			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1293		}
1294		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
1295		    net, abort_no_unlock, vrf_id) < 0) {
1296			/* error in parsing parameters */
1297			return (-1);
1298		}
1299		/* update our state */
1300		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1301		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1302
1303		/* reset the RTO calc */
1304		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1305			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1306			    stcb->asoc.overall_error_count,
1307			    0,
1308			    SCTP_FROM_SCTP_INPUT,
1309			    __LINE__);
1310		}
1311		stcb->asoc.overall_error_count = 0;
1312		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1313		/*
1314		 * collapse the init timer back in case of a exponential
1315		 * backoff
1316		 */
1317		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1318		    stcb, net);
1319		/*
1320		 * the send at the end of the inbound data processing will
1321		 * cause the cookie to be sent
1322		 */
1323		break;
1324	case SCTP_STATE_SHUTDOWN_SENT:
1325		/* incorrect state... discard */
1326		break;
1327	case SCTP_STATE_COOKIE_ECHOED:
1328		/* incorrect state... discard */
1329		break;
1330	case SCTP_STATE_OPEN:
1331		/* incorrect state... discard */
1332		break;
1333	case SCTP_STATE_EMPTY:
1334	case SCTP_STATE_INUSE:
1335	default:
1336		/* incorrect state... discard */
1337		return (-1);
1338		break;
1339	}
1340	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1341	return (0);
1342}
1343
1344static struct sctp_tcb *
1345sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1346    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1347    struct sctp_inpcb *inp, struct sctp_nets **netp,
1348    struct sockaddr *init_src, int *notification,
1349    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1350    uint32_t vrf_id, uint16_t port);
1351
1352
1353/*
1354 * handle a state cookie for an existing association m: input packet mbuf
1355 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1356 * "split" mbuf and the cookie signature does not exist offset: offset into
1357 * mbuf to the cookie-echo chunk
1358 */
1359static struct sctp_tcb *
1360sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1361    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1362    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1363    struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
1364    uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port)
1365{
1366	struct sctp_association *asoc;
1367	struct sctp_init_chunk *init_cp, init_buf;
1368	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1369	struct sctp_nets *net;
1370	struct mbuf *op_err;
1371	struct sctp_paramhdr *ph;
1372	int chk_length;
1373	int init_offset, initack_offset, i;
1374	int retval;
1375	int spec_flag = 0;
1376	uint32_t how_indx;
1377
1378	net = *netp;
1379	/* I know that the TCB is non-NULL from the caller */
1380	asoc = &stcb->asoc;
1381	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1382		if (asoc->cookie_how[how_indx] == 0)
1383			break;
1384	}
1385	if (how_indx < sizeof(asoc->cookie_how)) {
1386		asoc->cookie_how[how_indx] = 1;
1387	}
1388	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1389		/* SHUTDOWN came in after sending INIT-ACK */
1390		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1391		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1392		    0, M_DONTWAIT, 1, MT_DATA);
1393		if (op_err == NULL) {
1394			/* FOOBAR */
1395			return (NULL);
1396		}
1397		/* Set the len */
1398		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1399		ph = mtod(op_err, struct sctp_paramhdr *);
1400		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1401		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1402		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1403		    vrf_id, net->port);
1404		if (how_indx < sizeof(asoc->cookie_how))
1405			asoc->cookie_how[how_indx] = 2;
1406		return (NULL);
1407	}
1408	/*
1409	 * find and validate the INIT chunk in the cookie (peer's info) the
1410	 * INIT should start after the cookie-echo header struct (chunk
1411	 * header, state cookie header struct)
1412	 */
1413	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1414
1415	init_cp = (struct sctp_init_chunk *)
1416	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1417	    (uint8_t *) & init_buf);
1418	if (init_cp == NULL) {
1419		/* could not pull a INIT chunk in cookie */
1420		return (NULL);
1421	}
1422	chk_length = ntohs(init_cp->ch.chunk_length);
1423	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1424		return (NULL);
1425	}
1426	/*
1427	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1428	 * INIT-ACK follows the INIT chunk
1429	 */
1430	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1431	initack_cp = (struct sctp_init_ack_chunk *)
1432	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1433	    (uint8_t *) & initack_buf);
1434	if (initack_cp == NULL) {
1435		/* could not pull INIT-ACK chunk in cookie */
1436		return (NULL);
1437	}
1438	chk_length = ntohs(initack_cp->ch.chunk_length);
1439	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1440		return (NULL);
1441	}
1442	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1443	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1444		/*
1445		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1446		 * to get into the OPEN state
1447		 */
1448		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1449			/*-
1450			 * Opps, this means that we somehow generated two vtag's
1451			 * the same. I.e. we did:
1452			 *  Us               Peer
1453			 *   <---INIT(tag=a)------
1454			 *   ----INIT-ACK(tag=t)-->
1455			 *   ----INIT(tag=t)------> *1
1456			 *   <---INIT-ACK(tag=a)---
1457                         *   <----CE(tag=t)------------- *2
1458			 *
1459			 * At point *1 we should be generating a different
1460			 * tag t'. Which means we would throw away the CE and send
1461			 * ours instead. Basically this is case C (throw away side).
1462			 */
1463			if (how_indx < sizeof(asoc->cookie_how))
1464				asoc->cookie_how[how_indx] = 17;
1465			return (NULL);
1466
1467		}
1468		switch SCTP_GET_STATE
1469			(asoc) {
1470		case SCTP_STATE_COOKIE_WAIT:
1471		case SCTP_STATE_COOKIE_ECHOED:
1472			/*
1473			 * INIT was sent but got a COOKIE_ECHO with the
1474			 * correct tags... just accept it...but we must
1475			 * process the init so that we can make sure we have
1476			 * the right seq no's.
1477			 */
1478			/* First we must process the INIT !! */
1479			retval = sctp_process_init(init_cp, stcb, net);
1480			if (retval < 0) {
1481				if (how_indx < sizeof(asoc->cookie_how))
1482					asoc->cookie_how[how_indx] = 3;
1483				return (NULL);
1484			}
1485			/* we have already processed the INIT so no problem */
1486			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1487			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1488			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1489			/* update current state */
1490			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1491				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1492			else
1493				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1494
1495			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1496			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1497				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1498				    stcb->sctp_ep, stcb, asoc->primary_destination);
1499			}
1500			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1501			sctp_stop_all_cookie_timers(stcb);
1502			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1503			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1504			    (inp->sctp_socket->so_qlimit == 0)
1505			    ) {
1506#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1507				struct socket *so;
1508
1509#endif
1510				/*
1511				 * Here is where collision would go if we
1512				 * did a connect() and instead got a
1513				 * init/init-ack/cookie done before the
1514				 * init-ack came back..
1515				 */
1516				stcb->sctp_ep->sctp_flags |=
1517				    SCTP_PCB_FLAGS_CONNECTED;
1518#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1519				so = SCTP_INP_SO(stcb->sctp_ep);
1520				atomic_add_int(&stcb->asoc.refcnt, 1);
1521				SCTP_TCB_UNLOCK(stcb);
1522				SCTP_SOCKET_LOCK(so, 1);
1523				SCTP_TCB_LOCK(stcb);
1524				atomic_add_int(&stcb->asoc.refcnt, -1);
1525				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1526					SCTP_SOCKET_UNLOCK(so, 1);
1527					return (NULL);
1528				}
1529#endif
1530				soisconnected(stcb->sctp_socket);
1531#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1532				SCTP_SOCKET_UNLOCK(so, 1);
1533#endif
1534			}
1535			/* notify upper layer */
1536			*notification = SCTP_NOTIFY_ASSOC_UP;
1537			/*
1538			 * since we did not send a HB make sure we don't
1539			 * double things
1540			 */
1541			net->hb_responded = 1;
1542			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1543			    &cookie->time_entered, sctp_align_unsafe_makecopy);
1544
1545			if (stcb->asoc.sctp_autoclose_ticks &&
1546			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1547				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1548				    inp, stcb, NULL);
1549			}
1550			break;
1551		default:
1552			/*
1553			 * we're in the OPEN state (or beyond), so peer must
1554			 * have simply lost the COOKIE-ACK
1555			 */
1556			break;
1557			}	/* end switch */
1558		sctp_stop_all_cookie_timers(stcb);
1559		/*
1560		 * We ignore the return code here.. not sure if we should
1561		 * somehow abort.. but we do have an existing asoc. This
1562		 * really should not fail.
1563		 */
1564		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1565		    init_offset + sizeof(struct sctp_init_chunk),
1566		    initack_offset, sh, init_src)) {
1567			if (how_indx < sizeof(asoc->cookie_how))
1568				asoc->cookie_how[how_indx] = 4;
1569			return (NULL);
1570		}
1571		/* respond with a COOKIE-ACK */
1572		sctp_toss_old_cookies(stcb, asoc);
1573		sctp_send_cookie_ack(stcb);
1574		if (how_indx < sizeof(asoc->cookie_how))
1575			asoc->cookie_how[how_indx] = 5;
1576		return (stcb);
1577	}
1578	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1579	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1580	    cookie->tie_tag_my_vtag == 0 &&
1581	    cookie->tie_tag_peer_vtag == 0) {
1582		/*
1583		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1584		 */
1585		if (how_indx < sizeof(asoc->cookie_how))
1586			asoc->cookie_how[how_indx] = 6;
1587		return (NULL);
1588	}
1589	/*
1590	 * If nat support, and the below and stcb is established, send back
1591	 * a ABORT(colliding state) if we are established.
1592	 */
1593	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
1594	    (asoc->peer_supports_nat) &&
1595	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1596	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1597	    (asoc->peer_vtag == 0)))) {
1598		/*
1599		 * Special case - Peer's support nat. We may have two init's
1600		 * that we gave out the same tag on since one was not
1601		 * established.. i.e. we get INIT from host-1 behind the nat
1602		 * and we respond tag-a, we get a INIT from host-2 behind
1603		 * the nat and we get tag-a again. Then we bring up host-1
1604		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1605		 * Now we have colliding state. We must send an abort here
1606		 * with colliding state indication.
1607		 */
1608		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1609		    0, M_DONTWAIT, 1, MT_DATA);
1610		if (op_err == NULL) {
1611			/* FOOBAR */
1612			return (NULL);
1613		}
1614		/* pre-reserve some space */
1615#ifdef INET6
1616		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1617#else
1618		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
1619#endif
1620		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1621		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1622		/* Set the len */
1623		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1624		ph = mtod(op_err, struct sctp_paramhdr *);
1625		ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
1626		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1627		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
1628		return (NULL);
1629	}
1630	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1631	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1632	    (asoc->peer_vtag == 0))) {
1633		/*
1634		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1635		 * should be ok, re-accept peer info
1636		 */
1637		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1638			/*
1639			 * Extension of case C. If we hit this, then the
1640			 * random number generator returned the same vtag
1641			 * when we first sent our INIT-ACK and when we later
1642			 * sent our INIT. The side with the seq numbers that
1643			 * are different will be the one that normnally
1644			 * would have hit case C. This in effect "extends"
1645			 * our vtags in this collision case to be 64 bits.
1646			 * The same collision could occur aka you get both
1647			 * vtag and seq number the same twice in a row.. but
1648			 * is much less likely. If it did happen then we
1649			 * would proceed through and bring up the assoc.. we
1650			 * may end up with the wrong stream setup however..
1651			 * which would be bad.. but there is no way to
1652			 * tell.. until we send on a stream that does not
1653			 * exist :-)
1654			 */
1655			if (how_indx < sizeof(asoc->cookie_how))
1656				asoc->cookie_how[how_indx] = 7;
1657
1658			return (NULL);
1659		}
1660		if (how_indx < sizeof(asoc->cookie_how))
1661			asoc->cookie_how[how_indx] = 8;
1662		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1663		sctp_stop_all_cookie_timers(stcb);
1664		/*
1665		 * since we did not send a HB make sure we don't double
1666		 * things
1667		 */
1668		net->hb_responded = 1;
1669		if (stcb->asoc.sctp_autoclose_ticks &&
1670		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1671			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1672			    NULL);
1673		}
1674		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1675		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1676
1677		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1678			/*
1679			 * Ok the peer probably discarded our data (if we
1680			 * echoed a cookie+data). So anything on the
1681			 * sent_queue should be marked for retransmit, we
1682			 * may not get something to kick us so it COULD
1683			 * still take a timeout to move these.. but it can't
1684			 * hurt to mark them.
1685			 */
1686			struct sctp_tmit_chunk *chk;
1687
1688			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1689				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1690					chk->sent = SCTP_DATAGRAM_RESEND;
1691					sctp_flight_size_decrease(chk);
1692					sctp_total_flight_decrease(stcb, chk);
1693					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1694					spec_flag++;
1695				}
1696			}
1697
1698		}
1699		/* process the INIT info (peer's info) */
1700		retval = sctp_process_init(init_cp, stcb, net);
1701		if (retval < 0) {
1702			if (how_indx < sizeof(asoc->cookie_how))
1703				asoc->cookie_how[how_indx] = 9;
1704			return (NULL);
1705		}
1706		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1707		    init_offset + sizeof(struct sctp_init_chunk),
1708		    initack_offset, sh, init_src)) {
1709			if (how_indx < sizeof(asoc->cookie_how))
1710				asoc->cookie_how[how_indx] = 10;
1711			return (NULL);
1712		}
1713		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1714		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1715			*notification = SCTP_NOTIFY_ASSOC_UP;
1716
1717			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1718			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1719			    (inp->sctp_socket->so_qlimit == 0)) {
1720#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1721				struct socket *so;
1722
1723#endif
1724				stcb->sctp_ep->sctp_flags |=
1725				    SCTP_PCB_FLAGS_CONNECTED;
1726#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1727				so = SCTP_INP_SO(stcb->sctp_ep);
1728				atomic_add_int(&stcb->asoc.refcnt, 1);
1729				SCTP_TCB_UNLOCK(stcb);
1730				SCTP_SOCKET_LOCK(so, 1);
1731				SCTP_TCB_LOCK(stcb);
1732				atomic_add_int(&stcb->asoc.refcnt, -1);
1733				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1734					SCTP_SOCKET_UNLOCK(so, 1);
1735					return (NULL);
1736				}
1737#endif
1738				soisconnected(stcb->sctp_socket);
1739#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1740				SCTP_SOCKET_UNLOCK(so, 1);
1741#endif
1742			}
1743			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1744				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1745			else
1746				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1747			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1748		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1749			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1750		} else {
1751			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1752		}
1753		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1754		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1755			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1756			    stcb->sctp_ep, stcb, asoc->primary_destination);
1757		}
1758		sctp_stop_all_cookie_timers(stcb);
1759		sctp_toss_old_cookies(stcb, asoc);
1760		sctp_send_cookie_ack(stcb);
1761		if (spec_flag) {
1762			/*
1763			 * only if we have retrans set do we do this. What
1764			 * this call does is get only the COOKIE-ACK out and
1765			 * then when we return the normal call to
1766			 * sctp_chunk_output will get the retrans out behind
1767			 * this.
1768			 */
1769			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1770		}
1771		if (how_indx < sizeof(asoc->cookie_how))
1772			asoc->cookie_how[how_indx] = 11;
1773
1774		return (stcb);
1775	}
1776	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1777	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1778	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1779	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1780	    cookie->tie_tag_peer_vtag != 0) {
1781		struct sctpasochead *head;
1782
1783		if (asoc->peer_supports_nat) {
1784			/*
1785			 * This is a gross gross hack. just call the
1786			 * cookie_new code since we are allowing a duplicate
1787			 * association. I hope this works...
1788			 */
1789			return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len,
1790			    inp, netp, init_src, notification,
1791			    auth_skipped, auth_offset, auth_len,
1792			    vrf_id, port));
1793		}
1794		/*
1795		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1796		 */
1797		/* temp code */
1798		if (how_indx < sizeof(asoc->cookie_how))
1799			asoc->cookie_how[how_indx] = 12;
1800		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1801		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1802
1803		*sac_assoc_id = sctp_get_associd(stcb);
1804		/* notify upper layer */
1805		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1806		atomic_add_int(&stcb->asoc.refcnt, 1);
1807		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1808		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1809		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1810			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1811		}
1812		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1813			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1814		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1815			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1816		}
1817		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1818			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1819			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1820			    stcb->sctp_ep, stcb, asoc->primary_destination);
1821
1822		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1823			/* move to OPEN state, if not in SHUTDOWN_SENT */
1824			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1825		}
1826		asoc->pre_open_streams =
1827		    ntohs(initack_cp->init.num_outbound_streams);
1828		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1829		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1830		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1831
1832		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1833
1834		asoc->str_reset_seq_in = asoc->init_seq_number;
1835
1836		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1837		if (asoc->mapping_array) {
1838			memset(asoc->mapping_array, 0,
1839			    asoc->mapping_array_size);
1840		}
1841		if (asoc->nr_mapping_array) {
1842			memset(asoc->nr_mapping_array, 0,
1843			    asoc->mapping_array_size);
1844		}
1845		SCTP_TCB_UNLOCK(stcb);
1846		SCTP_INP_INFO_WLOCK();
1847		SCTP_INP_WLOCK(stcb->sctp_ep);
1848		SCTP_TCB_LOCK(stcb);
1849		atomic_add_int(&stcb->asoc.refcnt, -1);
1850		/* send up all the data */
1851		SCTP_TCB_SEND_LOCK(stcb);
1852
1853		sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED);
1854		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1855			stcb->asoc.strmout[i].stream_no = i;
1856			stcb->asoc.strmout[i].next_sequence_sent = 0;
1857			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1858		}
1859		/* process the INIT-ACK info (my info) */
1860		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1861		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1862
1863		/* pull from vtag hash */
1864		LIST_REMOVE(stcb, sctp_asocs);
1865		/* re-insert to new vtag position */
1866		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1867		    SCTP_BASE_INFO(hashasocmark))];
1868		/*
1869		 * put it in the bucket in the vtag hash of assoc's for the
1870		 * system
1871		 */
1872		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1873
1874		/* process the INIT info (peer's info) */
1875		SCTP_TCB_SEND_UNLOCK(stcb);
1876		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1877		SCTP_INP_INFO_WUNLOCK();
1878
1879		retval = sctp_process_init(init_cp, stcb, net);
1880		if (retval < 0) {
1881			if (how_indx < sizeof(asoc->cookie_how))
1882				asoc->cookie_how[how_indx] = 13;
1883
1884			return (NULL);
1885		}
1886		/*
1887		 * since we did not send a HB make sure we don't double
1888		 * things
1889		 */
1890		net->hb_responded = 1;
1891
1892		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1893		    init_offset + sizeof(struct sctp_init_chunk),
1894		    initack_offset, sh, init_src)) {
1895			if (how_indx < sizeof(asoc->cookie_how))
1896				asoc->cookie_how[how_indx] = 14;
1897
1898			return (NULL);
1899		}
1900		/* respond with a COOKIE-ACK */
1901		sctp_stop_all_cookie_timers(stcb);
1902		sctp_toss_old_cookies(stcb, asoc);
1903		sctp_send_cookie_ack(stcb);
1904		if (how_indx < sizeof(asoc->cookie_how))
1905			asoc->cookie_how[how_indx] = 15;
1906
1907		return (stcb);
1908	}
1909	if (how_indx < sizeof(asoc->cookie_how))
1910		asoc->cookie_how[how_indx] = 16;
1911	/* all other cases... */
1912	return (NULL);
1913}
1914
1915
1916/*
1917 * handle a state cookie for a new association m: input packet mbuf chain--
1918 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1919 * and the cookie signature does not exist offset: offset into mbuf to the
1920 * cookie-echo chunk length: length of the cookie chunk to: where the init
1921 * was from returns a new TCB
1922 */
1923struct sctp_tcb *
1924sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1925    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1926    struct sctp_inpcb *inp, struct sctp_nets **netp,
1927    struct sockaddr *init_src, int *notification,
1928    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1929    uint32_t vrf_id, uint16_t port)
1930{
1931	struct sctp_tcb *stcb;
1932	struct sctp_init_chunk *init_cp, init_buf;
1933	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1934	struct sockaddr_storage sa_store;
1935	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1936	struct sockaddr_in *sin;
1937	struct sockaddr_in6 *sin6;
1938	struct sctp_association *asoc;
1939	int chk_length;
1940	int init_offset, initack_offset, initack_limit;
1941	int retval;
1942	int error = 0;
1943	uint32_t old_tag;
1944	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1945
1946#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1947	struct socket *so;
1948
1949	so = SCTP_INP_SO(inp);
1950#endif
1951
1952	/*
1953	 * find and validate the INIT chunk in the cookie (peer's info) the
1954	 * INIT should start after the cookie-echo header struct (chunk
1955	 * header, state cookie header struct)
1956	 */
1957	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1958	init_cp = (struct sctp_init_chunk *)
1959	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1960	    (uint8_t *) & init_buf);
1961	if (init_cp == NULL) {
1962		/* could not pull a INIT chunk in cookie */
1963		SCTPDBG(SCTP_DEBUG_INPUT1,
1964		    "process_cookie_new: could not pull INIT chunk hdr\n");
1965		return (NULL);
1966	}
1967	chk_length = ntohs(init_cp->ch.chunk_length);
1968	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1969		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1970		return (NULL);
1971	}
1972	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1973	/*
1974	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1975	 * INIT-ACK follows the INIT chunk
1976	 */
1977	initack_cp = (struct sctp_init_ack_chunk *)
1978	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1979	    (uint8_t *) & initack_buf);
1980	if (initack_cp == NULL) {
1981		/* could not pull INIT-ACK chunk in cookie */
1982		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1983		return (NULL);
1984	}
1985	chk_length = ntohs(initack_cp->ch.chunk_length);
1986	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1987		return (NULL);
1988	}
1989	/*
1990	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1991	 * "initack_limit" value.  This is because the chk_length field
1992	 * includes the length of the cookie, but the cookie is omitted when
1993	 * the INIT and INIT_ACK are tacked onto the cookie...
1994	 */
1995	initack_limit = offset + cookie_len;
1996
1997	/*
1998	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
1999	 * and popluate
2000	 */
2001
2002	/*
2003	 * Here we do a trick, we set in NULL for the proc/thread argument.
2004	 * We do this since in effect we only use the p argument when the
2005	 * socket is unbound and we must do an implicit bind. Since we are
2006	 * getting a cookie, we cannot be unbound.
2007	 */
2008	stcb = sctp_aloc_assoc(inp, init_src, &error,
2009	    ntohl(initack_cp->init.initiate_tag), vrf_id,
2010	    (struct thread *)NULL
2011	    );
2012	if (stcb == NULL) {
2013		struct mbuf *op_err;
2014
2015		/* memory problem? */
2016		SCTPDBG(SCTP_DEBUG_INPUT1,
2017		    "process_cookie_new: no room for another TCB!\n");
2018		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2019
2020		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2021		    sh, op_err, vrf_id, port);
2022		return (NULL);
2023	}
2024	/* get the correct sctp_nets */
2025	if (netp)
2026		*netp = sctp_findnet(stcb, init_src);
2027
2028	asoc = &stcb->asoc;
2029	/* get scope variables out of cookie */
2030	asoc->ipv4_local_scope = cookie->ipv4_scope;
2031	asoc->site_scope = cookie->site_scope;
2032	asoc->local_scope = cookie->local_scope;
2033	asoc->loopback_scope = cookie->loopback_scope;
2034
2035	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2036	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2037		struct mbuf *op_err;
2038
2039		/*
2040		 * Houston we have a problem. The EP changed while the
2041		 * cookie was in flight. Only recourse is to abort the
2042		 * association.
2043		 */
2044		atomic_add_int(&stcb->asoc.refcnt, 1);
2045		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2046		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2047		    sh, op_err, vrf_id, port);
2048#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2049		SCTP_TCB_UNLOCK(stcb);
2050		SCTP_SOCKET_LOCK(so, 1);
2051		SCTP_TCB_LOCK(stcb);
2052#endif
2053		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2054		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2055#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2056		SCTP_SOCKET_UNLOCK(so, 1);
2057#endif
2058		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2059		return (NULL);
2060	}
2061	/* process the INIT-ACK info (my info) */
2062	old_tag = asoc->my_vtag;
2063	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2064	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2065	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2066	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2067	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2068	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2069	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2070	asoc->str_reset_seq_in = asoc->init_seq_number;
2071
2072	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2073
2074	/* process the INIT info (peer's info) */
2075	if (netp)
2076		retval = sctp_process_init(init_cp, stcb, *netp);
2077	else
2078		retval = 0;
2079	if (retval < 0) {
2080		atomic_add_int(&stcb->asoc.refcnt, 1);
2081#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2082		SCTP_TCB_UNLOCK(stcb);
2083		SCTP_SOCKET_LOCK(so, 1);
2084		SCTP_TCB_LOCK(stcb);
2085#endif
2086		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2087#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2088		SCTP_SOCKET_UNLOCK(so, 1);
2089#endif
2090		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2091		return (NULL);
2092	}
2093	/* load all addresses */
2094	if (sctp_load_addresses_from_init(stcb, m, iphlen,
2095	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
2096	    init_src)) {
2097		atomic_add_int(&stcb->asoc.refcnt, 1);
2098#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2099		SCTP_TCB_UNLOCK(stcb);
2100		SCTP_SOCKET_LOCK(so, 1);
2101		SCTP_TCB_LOCK(stcb);
2102#endif
2103		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2104#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2105		SCTP_SOCKET_UNLOCK(so, 1);
2106#endif
2107		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2108		return (NULL);
2109	}
2110	/*
2111	 * verify any preceding AUTH chunk that was skipped
2112	 */
2113	/* pull the local authentication parameters from the cookie/init-ack */
2114	sctp_auth_get_cookie_params(stcb, m,
2115	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2116	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2117	if (auth_skipped) {
2118		struct sctp_auth_chunk *auth;
2119
2120		auth = (struct sctp_auth_chunk *)
2121		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2122		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2123			/* auth HMAC failed, dump the assoc and packet */
2124			SCTPDBG(SCTP_DEBUG_AUTH1,
2125			    "COOKIE-ECHO: AUTH failed\n");
2126			atomic_add_int(&stcb->asoc.refcnt, 1);
2127#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2128			SCTP_TCB_UNLOCK(stcb);
2129			SCTP_SOCKET_LOCK(so, 1);
2130			SCTP_TCB_LOCK(stcb);
2131#endif
2132			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2133#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2134			SCTP_SOCKET_UNLOCK(so, 1);
2135#endif
2136			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2137			return (NULL);
2138		} else {
2139			/* remaining chunks checked... good to go */
2140			stcb->asoc.authenticated = 1;
2141		}
2142	}
2143	/* update current state */
2144	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2145	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2146	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2147		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2148		    stcb->sctp_ep, stcb, asoc->primary_destination);
2149	}
2150	sctp_stop_all_cookie_timers(stcb);
2151	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2152	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2153
2154	/*
2155	 * if we're doing ASCONFs, check to see if we have any new local
2156	 * addresses that need to get added to the peer (eg. addresses
2157	 * changed while cookie echo in flight).  This needs to be done
2158	 * after we go to the OPEN state to do the correct asconf
2159	 * processing. else, make sure we have the correct addresses in our
2160	 * lists
2161	 */
2162
2163	/* warning, we re-use sin, sin6, sa_store here! */
2164	/* pull in local_address (our "from" address) */
2165	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
2166		/* source addr is IPv4 */
2167		sin = (struct sockaddr_in *)initack_src;
2168		memset(sin, 0, sizeof(*sin));
2169		sin->sin_family = AF_INET;
2170		sin->sin_len = sizeof(struct sockaddr_in);
2171		sin->sin_addr.s_addr = cookie->laddress[0];
2172	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
2173		/* source addr is IPv6 */
2174		sin6 = (struct sockaddr_in6 *)initack_src;
2175		memset(sin6, 0, sizeof(*sin6));
2176		sin6->sin6_family = AF_INET6;
2177		sin6->sin6_len = sizeof(struct sockaddr_in6);
2178		sin6->sin6_scope_id = cookie->scope_id;
2179		memcpy(&sin6->sin6_addr, cookie->laddress,
2180		    sizeof(sin6->sin6_addr));
2181	} else {
2182		atomic_add_int(&stcb->asoc.refcnt, 1);
2183#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2184		SCTP_TCB_UNLOCK(stcb);
2185		SCTP_SOCKET_LOCK(so, 1);
2186		SCTP_TCB_LOCK(stcb);
2187#endif
2188		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2189#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2190		SCTP_SOCKET_UNLOCK(so, 1);
2191#endif
2192		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2193		return (NULL);
2194	}
2195
2196	/* set up to notify upper layer */
2197	*notification = SCTP_NOTIFY_ASSOC_UP;
2198	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2199	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2200	    (inp->sctp_socket->so_qlimit == 0)) {
2201		/*
2202		 * This is an endpoint that called connect() how it got a
2203		 * cookie that is NEW is a bit of a mystery. It must be that
2204		 * the INIT was sent, but before it got there.. a complete
2205		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2206		 * should have went to the other code.. not here.. oh well..
2207		 * a bit of protection is worth having..
2208		 */
2209		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2210#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2211		atomic_add_int(&stcb->asoc.refcnt, 1);
2212		SCTP_TCB_UNLOCK(stcb);
2213		SCTP_SOCKET_LOCK(so, 1);
2214		SCTP_TCB_LOCK(stcb);
2215		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2216		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2217			SCTP_SOCKET_UNLOCK(so, 1);
2218			return (NULL);
2219		}
2220#endif
2221		soisconnected(stcb->sctp_socket);
2222#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2223		SCTP_SOCKET_UNLOCK(so, 1);
2224#endif
2225	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2226	    (inp->sctp_socket->so_qlimit)) {
2227		/*
2228		 * We don't want to do anything with this one. Since it is
2229		 * the listening guy. The timer will get started for
2230		 * accepted connections in the caller.
2231		 */
2232		;
2233	}
2234	/* since we did not send a HB make sure we don't double things */
2235	if ((netp) && (*netp))
2236		(*netp)->hb_responded = 1;
2237
2238	if (stcb->asoc.sctp_autoclose_ticks &&
2239	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2240		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2241	}
2242	/* calculate the RTT */
2243	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2244	if ((netp) && (*netp)) {
2245		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2246		    &cookie->time_entered, sctp_align_unsafe_makecopy);
2247	}
2248	/* respond with a COOKIE-ACK */
2249	sctp_send_cookie_ack(stcb);
2250
2251	/*
2252	 * check the address lists for any ASCONFs that need to be sent
2253	 * AFTER the cookie-ack is sent
2254	 */
2255	sctp_check_address_list(stcb, m,
2256	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2257	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2258	    initack_src, cookie->local_scope, cookie->site_scope,
2259	    cookie->ipv4_scope, cookie->loopback_scope);
2260
2261
2262	return (stcb);
2263}
2264
2265/*
2266 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2267 * we NEED to make sure we are not already using the vtag. If so we
2268 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2269	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2270							    SCTP_BASE_INFO(hashasocmark))];
2271	LIST_FOREACH(stcb, head, sctp_asocs) {
2272	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2273		       -- SEND ABORT - TRY AGAIN --
2274		}
2275	}
2276*/
2277
2278/*
2279 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2280 * existing (non-NULL) TCB
2281 */
2282static struct mbuf *
2283sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2284    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2285    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2286    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2287    struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port)
2288{
2289	struct sctp_state_cookie *cookie;
2290	struct sockaddr_in6 sin6;
2291	struct sockaddr_in sin;
2292	struct sctp_tcb *l_stcb = *stcb;
2293	struct sctp_inpcb *l_inp;
2294	struct sockaddr *to;
2295	sctp_assoc_t sac_restart_id;
2296	struct sctp_pcb *ep;
2297	struct mbuf *m_sig;
2298	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2299	uint8_t *sig;
2300	uint8_t cookie_ok = 0;
2301	unsigned int size_of_pkt, sig_offset, cookie_offset;
2302	unsigned int cookie_len;
2303	struct timeval now;
2304	struct timeval time_expires;
2305	struct sockaddr_storage dest_store;
2306	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
2307	struct ip *iph;
2308	int notification = 0;
2309	struct sctp_nets *netl;
2310	int had_a_existing_tcb = 0;
2311	int send_int_conf = 0;
2312
2313	SCTPDBG(SCTP_DEBUG_INPUT2,
2314	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2315
2316	if (inp_p == NULL) {
2317		return (NULL);
2318	}
2319	/* First get the destination address setup too. */
2320	iph = mtod(m, struct ip *);
2321	switch (iph->ip_v) {
2322	case IPVERSION:
2323		{
2324			/* its IPv4 */
2325			struct sockaddr_in *lsin;
2326
2327			lsin = (struct sockaddr_in *)(localep_sa);
2328			memset(lsin, 0, sizeof(*lsin));
2329			lsin->sin_family = AF_INET;
2330			lsin->sin_len = sizeof(*lsin);
2331			lsin->sin_port = sh->dest_port;
2332			lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
2333			size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
2334			break;
2335		}
2336#ifdef INET6
2337	case IPV6_VERSION >> 4:
2338		{
2339			/* its IPv6 */
2340			struct ip6_hdr *ip6;
2341			struct sockaddr_in6 *lsin6;
2342
2343			lsin6 = (struct sockaddr_in6 *)(localep_sa);
2344			memset(lsin6, 0, sizeof(*lsin6));
2345			lsin6->sin6_family = AF_INET6;
2346			lsin6->sin6_len = sizeof(struct sockaddr_in6);
2347			ip6 = mtod(m, struct ip6_hdr *);
2348			lsin6->sin6_port = sh->dest_port;
2349			lsin6->sin6_addr = ip6->ip6_dst;
2350			size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
2351			break;
2352		}
2353#endif
2354	default:
2355		return (NULL);
2356	}
2357
2358	cookie = &cp->cookie;
2359	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2360	cookie_len = ntohs(cp->ch.chunk_length);
2361
2362	if ((cookie->peerport != sh->src_port) &&
2363	    (cookie->myport != sh->dest_port) &&
2364	    (cookie->my_vtag != sh->v_tag)) {
2365		/*
2366		 * invalid ports or bad tag.  Note that we always leave the
2367		 * v_tag in the header in network order and when we stored
2368		 * it in the my_vtag slot we also left it in network order.
2369		 * This maintains the match even though it may be in the
2370		 * opposite byte order of the machine :->
2371		 */
2372		return (NULL);
2373	}
2374	if (cookie_len > size_of_pkt ||
2375	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2376	    sizeof(struct sctp_init_chunk) +
2377	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2378		/* cookie too long!  or too small */
2379		return (NULL);
2380	}
2381	/*
2382	 * split off the signature into its own mbuf (since it should not be
2383	 * calculated in the sctp_hmac_m() call).
2384	 */
2385	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2386	if (sig_offset > size_of_pkt) {
2387		/* packet not correct size! */
2388		/* XXX this may already be accounted for earlier... */
2389		return (NULL);
2390	}
2391	m_sig = m_split(m, sig_offset, M_DONTWAIT);
2392	if (m_sig == NULL) {
2393		/* out of memory or ?? */
2394		return (NULL);
2395	}
2396#ifdef SCTP_MBUF_LOGGING
2397	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2398		struct mbuf *mat;
2399
2400		mat = m_sig;
2401		while (mat) {
2402			if (SCTP_BUF_IS_EXTENDED(mat)) {
2403				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2404			}
2405			mat = SCTP_BUF_NEXT(mat);
2406		}
2407	}
2408#endif
2409
2410	/*
2411	 * compute the signature/digest for the cookie
2412	 */
2413	ep = &(*inp_p)->sctp_ep;
2414	l_inp = *inp_p;
2415	if (l_stcb) {
2416		SCTP_TCB_UNLOCK(l_stcb);
2417	}
2418	SCTP_INP_RLOCK(l_inp);
2419	if (l_stcb) {
2420		SCTP_TCB_LOCK(l_stcb);
2421	}
2422	/* which cookie is it? */
2423	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2424	    (ep->current_secret_number != ep->last_secret_number)) {
2425		/* it's the old cookie */
2426		(void)sctp_hmac_m(SCTP_HMAC,
2427		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2428		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2429	} else {
2430		/* it's the current cookie */
2431		(void)sctp_hmac_m(SCTP_HMAC,
2432		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2433		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2434	}
2435	/* get the signature */
2436	SCTP_INP_RUNLOCK(l_inp);
2437	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2438	if (sig == NULL) {
2439		/* couldn't find signature */
2440		sctp_m_freem(m_sig);
2441		return (NULL);
2442	}
2443	/* compare the received digest with the computed digest */
2444	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2445		/* try the old cookie? */
2446		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2447		    (ep->current_secret_number != ep->last_secret_number)) {
2448			/* compute digest with old */
2449			(void)sctp_hmac_m(SCTP_HMAC,
2450			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2451			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2452			/* compare */
2453			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2454				cookie_ok = 1;
2455		}
2456	} else {
2457		cookie_ok = 1;
2458	}
2459
2460	/*
2461	 * Now before we continue we must reconstruct our mbuf so that
2462	 * normal processing of any other chunks will work.
2463	 */
2464	{
2465		struct mbuf *m_at;
2466
2467		m_at = m;
2468		while (SCTP_BUF_NEXT(m_at) != NULL) {
2469			m_at = SCTP_BUF_NEXT(m_at);
2470		}
2471		SCTP_BUF_NEXT(m_at) = m_sig;
2472	}
2473
2474	if (cookie_ok == 0) {
2475		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2476		SCTPDBG(SCTP_DEBUG_INPUT2,
2477		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2478		    (uint32_t) offset, cookie_offset, sig_offset);
2479		return (NULL);
2480	}
2481	/*
2482	 * check the cookie timestamps to be sure it's not stale
2483	 */
2484	(void)SCTP_GETTIME_TIMEVAL(&now);
2485	/* Expire time is in Ticks, so we convert to seconds */
2486	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2487	time_expires.tv_usec = cookie->time_entered.tv_usec;
2488	/*
2489	 * TODO sctp_constants.h needs alternative time macros when _KERNEL
2490	 * is undefined.
2491	 */
2492	if (timevalcmp(&now, &time_expires, >)) {
2493		/* cookie is stale! */
2494		struct mbuf *op_err;
2495		struct sctp_stale_cookie_msg *scm;
2496		uint32_t tim;
2497
2498		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2499		    0, M_DONTWAIT, 1, MT_DATA);
2500		if (op_err == NULL) {
2501			/* FOOBAR */
2502			return (NULL);
2503		}
2504		/* Set the len */
2505		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2506		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2507		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2508		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2509		    (sizeof(uint32_t))));
2510		/* seconds to usec */
2511		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2512		/* add in usec */
2513		if (tim == 0)
2514			tim = now.tv_usec - cookie->time_entered.tv_usec;
2515		scm->time_usec = htonl(tim);
2516		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
2517		    vrf_id, port);
2518		return (NULL);
2519	}
2520	/*
2521	 * Now we must see with the lookup address if we have an existing
2522	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2523	 * and a INIT collided with us and somewhere the peer sent the
2524	 * cookie on another address besides the single address our assoc
2525	 * had for him. In this case we will have one of the tie-tags set at
2526	 * least AND the address field in the cookie can be used to look it
2527	 * up.
2528	 */
2529	to = NULL;
2530	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
2531		memset(&sin6, 0, sizeof(sin6));
2532		sin6.sin6_family = AF_INET6;
2533		sin6.sin6_len = sizeof(sin6);
2534		sin6.sin6_port = sh->src_port;
2535		sin6.sin6_scope_id = cookie->scope_id;
2536		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2537		    sizeof(sin6.sin6_addr.s6_addr));
2538		to = (struct sockaddr *)&sin6;
2539	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
2540		memset(&sin, 0, sizeof(sin));
2541		sin.sin_family = AF_INET;
2542		sin.sin_len = sizeof(sin);
2543		sin.sin_port = sh->src_port;
2544		sin.sin_addr.s_addr = cookie->address[0];
2545		to = (struct sockaddr *)&sin;
2546	} else {
2547		/* This should not happen */
2548		return (NULL);
2549	}
2550	if ((*stcb == NULL) && to) {
2551		/* Yep, lets check */
2552		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
2553		if (*stcb == NULL) {
2554			/*
2555			 * We should have only got back the same inp. If we
2556			 * got back a different ep we have a problem. The
2557			 * original findep got back l_inp and now
2558			 */
2559			if (l_inp != *inp_p) {
2560				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2561			}
2562		} else {
2563			if (*locked_tcb == NULL) {
2564				/*
2565				 * In this case we found the assoc only
2566				 * after we locked the create lock. This
2567				 * means we are in a colliding case and we
2568				 * must make sure that we unlock the tcb if
2569				 * its one of the cases where we throw away
2570				 * the incoming packets.
2571				 */
2572				*locked_tcb = *stcb;
2573
2574				/*
2575				 * We must also increment the inp ref count
2576				 * since the ref_count flags was set when we
2577				 * did not find the TCB, now we found it
2578				 * which reduces the refcount.. we must
2579				 * raise it back out to balance it all :-)
2580				 */
2581				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2582				if ((*stcb)->sctp_ep != l_inp) {
2583					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2584					    (*stcb)->sctp_ep, l_inp);
2585				}
2586			}
2587		}
2588	}
2589	if (to == NULL) {
2590		return (NULL);
2591	}
2592	cookie_len -= SCTP_SIGNATURE_SIZE;
2593	if (*stcb == NULL) {
2594		/* this is the "normal" case... get a new TCB */
2595		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2596		    cookie_len, *inp_p, netp, to, &notification,
2597		    auth_skipped, auth_offset, auth_len, vrf_id, port);
2598	} else {
2599		/* this is abnormal... cookie-echo on existing TCB */
2600		had_a_existing_tcb = 1;
2601		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2602		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2603		    &notification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port);
2604	}
2605
2606	if (*stcb == NULL) {
2607		/* still no TCB... must be bad cookie-echo */
2608		return (NULL);
2609	}
2610	/*
2611	 * Ok, we built an association so confirm the address we sent the
2612	 * INIT-ACK to.
2613	 */
2614	netl = sctp_findnet(*stcb, to);
2615	/*
2616	 * This code should in theory NOT run but
2617	 */
2618	if (netl == NULL) {
2619		/* TSNH! Huh, why do I need to add this address here? */
2620		int ret;
2621
2622		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2623		    SCTP_IN_COOKIE_PROC);
2624		netl = sctp_findnet(*stcb, to);
2625	}
2626	if (netl) {
2627		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2628			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2629			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2630			    netl);
2631			send_int_conf = 1;
2632		}
2633	}
2634	if (*stcb) {
2635		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2636		    *stcb, NULL);
2637	}
2638	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2639		if (!had_a_existing_tcb ||
2640		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2641			/*
2642			 * If we have a NEW cookie or the connect never
2643			 * reached the connected state during collision we
2644			 * must do the TCP accept thing.
2645			 */
2646			struct socket *so, *oso;
2647			struct sctp_inpcb *inp;
2648
2649			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2650				/*
2651				 * For a restart we will keep the same
2652				 * socket, no need to do anything. I THINK!!
2653				 */
2654				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED);
2655				if (send_int_conf) {
2656					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2657					    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2658				}
2659				return (m);
2660			}
2661			oso = (*inp_p)->sctp_socket;
2662			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2663			SCTP_TCB_UNLOCK((*stcb));
2664			so = sonewconn(oso, 0
2665			    );
2666			SCTP_TCB_LOCK((*stcb));
2667			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2668
2669			if (so == NULL) {
2670				struct mbuf *op_err;
2671
2672#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2673				struct socket *pcb_so;
2674
2675#endif
2676				/* Too many sockets */
2677				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2678				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2679				sctp_abort_association(*inp_p, NULL, m, iphlen,
2680				    sh, op_err, vrf_id, port);
2681#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2682				pcb_so = SCTP_INP_SO(*inp_p);
2683				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2684				SCTP_TCB_UNLOCK((*stcb));
2685				SCTP_SOCKET_LOCK(pcb_so, 1);
2686				SCTP_TCB_LOCK((*stcb));
2687				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2688#endif
2689				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2690#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2691				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2692#endif
2693				return (NULL);
2694			}
2695			inp = (struct sctp_inpcb *)so->so_pcb;
2696			SCTP_INP_INCR_REF(inp);
2697			/*
2698			 * We add the unbound flag here so that if we get an
2699			 * soabort() before we get the move_pcb done, we
2700			 * will properly cleanup.
2701			 */
2702			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2703			    SCTP_PCB_FLAGS_CONNECTED |
2704			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2705			    SCTP_PCB_FLAGS_UNBOUND |
2706			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2707			    SCTP_PCB_FLAGS_DONT_WAKE);
2708			inp->sctp_features = (*inp_p)->sctp_features;
2709			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2710			inp->sctp_socket = so;
2711			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2712			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
2713			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2714			inp->sctp_context = (*inp_p)->sctp_context;
2715			inp->inp_starting_point_for_iterator = NULL;
2716			/*
2717			 * copy in the authentication parameters from the
2718			 * original endpoint
2719			 */
2720			if (inp->sctp_ep.local_hmacs)
2721				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2722			inp->sctp_ep.local_hmacs =
2723			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2724			if (inp->sctp_ep.local_auth_chunks)
2725				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2726			inp->sctp_ep.local_auth_chunks =
2727			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2728
2729			/*
2730			 * Now we must move it from one hash table to
2731			 * another and get the tcb in the right place.
2732			 */
2733
2734			/*
2735			 * This is where the one-2-one socket is put into
2736			 * the accept state waiting for the accept!
2737			 */
2738			if (*stcb) {
2739				(*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE;
2740			}
2741			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2742
2743			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2744			SCTP_TCB_UNLOCK((*stcb));
2745
2746			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2747			    0);
2748			SCTP_TCB_LOCK((*stcb));
2749			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2750
2751
2752			/*
2753			 * now we must check to see if we were aborted while
2754			 * the move was going on and the lock/unlock
2755			 * happened.
2756			 */
2757			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2758				/*
2759				 * yep it was, we leave the assoc attached
2760				 * to the socket since the sctp_inpcb_free()
2761				 * call will send an abort for us.
2762				 */
2763				SCTP_INP_DECR_REF(inp);
2764				return (NULL);
2765			}
2766			SCTP_INP_DECR_REF(inp);
2767			/* Switch over to the new guy */
2768			*inp_p = inp;
2769			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2770			if (send_int_conf) {
2771				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2772				    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2773			}
2774			/*
2775			 * Pull it from the incomplete queue and wake the
2776			 * guy
2777			 */
2778#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2779			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2780			SCTP_TCB_UNLOCK((*stcb));
2781			SCTP_SOCKET_LOCK(so, 1);
2782#endif
2783			soisconnected(so);
2784#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2785			SCTP_TCB_LOCK((*stcb));
2786			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2787			SCTP_SOCKET_UNLOCK(so, 1);
2788#endif
2789			return (m);
2790		}
2791	}
2792	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
2793		if (notification) {
2794			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2795		}
2796		if (send_int_conf) {
2797			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2798			    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2799		}
2800	}
2801	return (m);
2802}
2803
2804static void
2805sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2806    struct sctp_tcb *stcb, struct sctp_nets *net)
2807{
2808	/* cp must not be used, others call this without a c-ack :-) */
2809	struct sctp_association *asoc;
2810
2811	SCTPDBG(SCTP_DEBUG_INPUT2,
2812	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2813	if (stcb == NULL)
2814		return;
2815
2816	asoc = &stcb->asoc;
2817
2818	sctp_stop_all_cookie_timers(stcb);
2819	/* process according to association state */
2820	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2821		/* state change only needed when I am in right state */
2822		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2823		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2824		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2825			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2826			    stcb->sctp_ep, stcb, asoc->primary_destination);
2827
2828		}
2829		/* update RTO */
2830		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2831		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2832		if (asoc->overall_error_count == 0) {
2833			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2834			    &asoc->time_entered, sctp_align_safe_nocopy);
2835		}
2836		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2837		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2838		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2839		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2840#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2841			struct socket *so;
2842
2843#endif
2844			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2845#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2846			so = SCTP_INP_SO(stcb->sctp_ep);
2847			atomic_add_int(&stcb->asoc.refcnt, 1);
2848			SCTP_TCB_UNLOCK(stcb);
2849			SCTP_SOCKET_LOCK(so, 1);
2850			SCTP_TCB_LOCK(stcb);
2851			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2852			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2853				SCTP_SOCKET_UNLOCK(so, 1);
2854				return;
2855			}
2856#endif
2857			soisconnected(stcb->sctp_socket);
2858#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2859			SCTP_SOCKET_UNLOCK(so, 1);
2860#endif
2861		}
2862		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2863		    stcb, net);
2864		/*
2865		 * since we did not send a HB make sure we don't double
2866		 * things
2867		 */
2868		net->hb_responded = 1;
2869
2870		if (stcb->asoc.sctp_autoclose_ticks &&
2871		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2872			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2873			    stcb->sctp_ep, stcb, NULL);
2874		}
2875		/*
2876		 * send ASCONF if parameters are pending and ASCONFs are
2877		 * allowed (eg. addresses changed when init/cookie echo were
2878		 * in flight)
2879		 */
2880		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2881		    (stcb->asoc.peer_supports_asconf) &&
2882		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2883#ifdef SCTP_TIMER_BASED_ASCONF
2884			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2885			    stcb->sctp_ep, stcb,
2886			    stcb->asoc.primary_destination);
2887#else
2888			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2889			    SCTP_ADDR_NOT_LOCKED);
2890#endif
2891		}
2892	}
2893	/* Toss the cookie if I can */
2894	sctp_toss_old_cookies(stcb, asoc);
2895	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2896		/* Restart the timer if we have pending data */
2897		struct sctp_tmit_chunk *chk;
2898
2899		chk = TAILQ_FIRST(&asoc->sent_queue);
2900		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
2901	}
2902}
2903
2904static void
2905sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2906    struct sctp_tcb *stcb)
2907{
2908	struct sctp_nets *net;
2909	struct sctp_tmit_chunk *lchk;
2910	struct sctp_ecne_chunk bkup;
2911	uint8_t override_bit = 0;
2912	uint32_t tsn, window_data_tsn;
2913	int len;
2914	int pkt_cnt;
2915
2916	len = ntohs(cp->ch.chunk_length);
2917	if ((len != sizeof(struct sctp_ecne_chunk)) &&
2918	    (len != sizeof(struct old_sctp_ecne_chunk))) {
2919		return;
2920	}
2921	if (len == sizeof(struct old_sctp_ecne_chunk)) {
2922		/* Its the old format */
2923		memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
2924		bkup.num_pkts_since_cwr = htonl(1);
2925		cp = &bkup;
2926	}
2927	SCTP_STAT_INCR(sctps_recvecne);
2928	tsn = ntohl(cp->tsn);
2929	pkt_cnt = ntohl(cp->num_pkts_since_cwr);
2930	lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
2931	if (lchk == NULL) {
2932		window_data_tsn = stcb->asoc.sending_seq - 1;
2933	} else {
2934		window_data_tsn = lchk->rec.data.TSN_seq;
2935	}
2936
2937	/* Find where it was sent to if possible. */
2938	net = NULL;
2939	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
2940		if (lchk->rec.data.TSN_seq == tsn) {
2941			net = lchk->whoTo;
2942			break;
2943		}
2944		if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) {
2945			break;
2946		}
2947	}
2948	if (net == NULL) {
2949		/*
2950		 * What to do. A previous send of a CWR was possibly lost.
2951		 * See how old it is, we may have it marked on the actual
2952		 * net.
2953		 */
2954		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2955			if (tsn == net->last_cwr_tsn) {
2956				/* Found him, send it off */
2957				goto out;
2958			}
2959		}
2960		/*
2961		 * If we reach here, we need to send a special CWR that says
2962		 * hey, we did this a long time ago and you lost the
2963		 * response.
2964		 */
2965		net = TAILQ_FIRST(&stcb->asoc.nets);
2966		override_bit = SCTP_CWR_REDUCE_OVERRIDE;
2967	}
2968out:
2969	if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
2970	    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
2971		/*
2972		 * JRS - Use the congestion control given in the pluggable
2973		 * CC module
2974		 */
2975		int ocwnd;
2976
2977		ocwnd = net->cwnd;
2978		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
2979		/*
2980		 * We reduce once every RTT. So we will only lower cwnd at
2981		 * the next sending seq i.e. the window_data_tsn
2982		 */
2983		net->cwr_window_tsn = window_data_tsn;
2984		net->ecn_ce_pkt_cnt += pkt_cnt;
2985		net->lost_cnt = pkt_cnt;
2986		net->last_cwr_tsn = tsn;
2987	} else {
2988		override_bit |= SCTP_CWR_IN_SAME_WINDOW;
2989		if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
2990		    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
2991			/*
2992			 * Another loss in the same window update how many
2993			 * marks/packets lost we have had.
2994			 */
2995			int cnt = 1;
2996
2997			if (pkt_cnt > net->lost_cnt) {
2998				/* Should be the case */
2999				cnt = (pkt_cnt - net->lost_cnt);
3000				net->ecn_ce_pkt_cnt += cnt;
3001			}
3002			net->lost_cnt = pkt_cnt;
3003			net->last_cwr_tsn = tsn;
3004			/*
3005			 * Most CC functions will ignore this call, since we
3006			 * are in-window yet of the initial CE the peer saw.
3007			 */
3008			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3009		}
3010	}
3011	/*
3012	 * We always send a CWR this way if our previous one was lost our
3013	 * peer will get an update, or if it is not time again to reduce we
3014	 * still get the cwr to the peer. Note we set the override when we
3015	 * could not find the TSN on the chunk or the destination network.
3016	 */
3017	sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3018}
3019
3020static void
3021sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3022{
3023	/*
3024	 * Here we get a CWR from the peer. We must look in the outqueue and
3025	 * make sure that we have a covered ECNE in teh control chunk part.
3026	 * If so remove it.
3027	 */
3028	struct sctp_tmit_chunk *chk;
3029	struct sctp_ecne_chunk *ecne;
3030	int override;
3031	uint32_t cwr_tsn;
3032
3033	cwr_tsn = ntohl(cp->tsn);
3034
3035	override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3036	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
3037		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3038			continue;
3039		}
3040		if ((override == 0) && (chk->whoTo != net)) {
3041			/* Must be from the right src unless override is set */
3042			continue;
3043		}
3044		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3045		if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3046			/* this covers this ECNE, we can remove it */
3047			stcb->asoc.ecn_echo_cnt_onq--;
3048			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3049			    sctp_next);
3050			if (chk->data) {
3051				sctp_m_freem(chk->data);
3052				chk->data = NULL;
3053			}
3054			stcb->asoc.ctrl_queue_cnt--;
3055			sctp_free_a_chunk(stcb, chk);
3056			if (override == 0) {
3057				break;
3058			}
3059		}
3060	}
3061}
3062
3063static void
3064sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
3065    struct sctp_tcb *stcb, struct sctp_nets *net)
3066{
3067	struct sctp_association *asoc;
3068
3069#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3070	struct socket *so;
3071
3072#endif
3073
3074	SCTPDBG(SCTP_DEBUG_INPUT2,
3075	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3076	if (stcb == NULL)
3077		return;
3078
3079	asoc = &stcb->asoc;
3080	/* process according to association state */
3081	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3082		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3083		SCTPDBG(SCTP_DEBUG_INPUT2,
3084		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3085		SCTP_TCB_UNLOCK(stcb);
3086		return;
3087	}
3088	/* notify upper layer protocol */
3089	if (stcb->sctp_socket) {
3090		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3091		/* are the queues empty? they should be */
3092		if (!TAILQ_EMPTY(&asoc->send_queue) ||
3093		    !TAILQ_EMPTY(&asoc->sent_queue) ||
3094		    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
3095			sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
3096		}
3097	}
3098	/* stop the timer */
3099	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
3100	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3101	/* free the TCB */
3102	SCTPDBG(SCTP_DEBUG_INPUT2,
3103	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3104#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3105	so = SCTP_INP_SO(stcb->sctp_ep);
3106	atomic_add_int(&stcb->asoc.refcnt, 1);
3107	SCTP_TCB_UNLOCK(stcb);
3108	SCTP_SOCKET_LOCK(so, 1);
3109	SCTP_TCB_LOCK(stcb);
3110	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3111#endif
3112	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3113#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3114	SCTP_SOCKET_UNLOCK(so, 1);
3115#endif
3116	return;
3117}
3118
3119static int
3120process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3121    struct sctp_nets *net, uint8_t flg)
3122{
3123	switch (desc->chunk_type) {
3124	case SCTP_DATA:
3125		/* find the tsn to resend (possibly */
3126		{
3127			uint32_t tsn;
3128			struct sctp_tmit_chunk *tp1;
3129
3130			tsn = ntohl(desc->tsn_ifany);
3131			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3132				if (tp1->rec.data.TSN_seq == tsn) {
3133					/* found it */
3134					break;
3135				}
3136				if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) {
3137					/* not found */
3138					tp1 = NULL;
3139					break;
3140				}
3141			}
3142			if (tp1 == NULL) {
3143				/*
3144				 * Do it the other way , aka without paying
3145				 * attention to queue seq order.
3146				 */
3147				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3148				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3149					if (tp1->rec.data.TSN_seq == tsn) {
3150						/* found it */
3151						break;
3152					}
3153				}
3154			}
3155			if (tp1 == NULL) {
3156				SCTP_STAT_INCR(sctps_pdrptsnnf);
3157			}
3158			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3159				uint8_t *ddp;
3160
3161				if (((flg & SCTP_BADCRC) == 0) &&
3162				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3163					return (0);
3164				}
3165				if ((stcb->asoc.peers_rwnd == 0) &&
3166				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3167					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3168					return (0);
3169				}
3170				if (stcb->asoc.peers_rwnd == 0 &&
3171				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3172					SCTP_STAT_INCR(sctps_pdrpdizrw);
3173					return (0);
3174				}
3175				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
3176				    sizeof(struct sctp_data_chunk));
3177				{
3178					unsigned int iii;
3179
3180					for (iii = 0; iii < sizeof(desc->data_bytes);
3181					    iii++) {
3182						if (ddp[iii] != desc->data_bytes[iii]) {
3183							SCTP_STAT_INCR(sctps_pdrpbadd);
3184							return (-1);
3185						}
3186					}
3187				}
3188
3189				if (tp1->do_rtt) {
3190					/*
3191					 * this guy had a RTO calculation
3192					 * pending on it, cancel it
3193					 */
3194					tp1->do_rtt = 0;
3195				}
3196				SCTP_STAT_INCR(sctps_pdrpmark);
3197				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3198					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3199				/*
3200				 * mark it as if we were doing a FR, since
3201				 * we will be getting gap ack reports behind
3202				 * the info from the router.
3203				 */
3204				tp1->rec.data.doing_fast_retransmit = 1;
3205				/*
3206				 * mark the tsn with what sequences can
3207				 * cause a new FR.
3208				 */
3209				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3210					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3211				} else {
3212					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
3213				}
3214
3215				/* restart the timer */
3216				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3217				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3218				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3219				    stcb, tp1->whoTo);
3220
3221				/* fix counts and things */
3222				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3223					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3224					    tp1->whoTo->flight_size,
3225					    tp1->book_size,
3226					    (uintptr_t) stcb,
3227					    tp1->rec.data.TSN_seq);
3228				}
3229				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3230					sctp_flight_size_decrease(tp1);
3231					sctp_total_flight_decrease(stcb, tp1);
3232				}
3233				tp1->sent = SCTP_DATAGRAM_RESEND;
3234			} {
3235				/* audit code */
3236				unsigned int audit;
3237
3238				audit = 0;
3239				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3240					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3241						audit++;
3242				}
3243				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3244				    sctp_next) {
3245					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3246						audit++;
3247				}
3248				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3249					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3250					    audit, stcb->asoc.sent_queue_retran_cnt);
3251#ifndef SCTP_AUDITING_ENABLED
3252					stcb->asoc.sent_queue_retran_cnt = audit;
3253#endif
3254				}
3255			}
3256		}
3257		break;
3258	case SCTP_ASCONF:
3259		{
3260			struct sctp_tmit_chunk *asconf;
3261
3262			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3263			    sctp_next) {
3264				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3265					break;
3266				}
3267			}
3268			if (asconf) {
3269				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3270					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3271				asconf->sent = SCTP_DATAGRAM_RESEND;
3272				asconf->snd_count--;
3273			}
3274		}
3275		break;
3276	case SCTP_INITIATION:
3277		/* resend the INIT */
3278		stcb->asoc.dropped_special_cnt++;
3279		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3280			/*
3281			 * If we can get it in, in a few attempts we do
3282			 * this, otherwise we let the timer fire.
3283			 */
3284			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3285			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3286			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3287		}
3288		break;
3289	case SCTP_SELECTIVE_ACK:
3290	case SCTP_NR_SELECTIVE_ACK:
3291		/* resend the sack */
3292		sctp_send_sack(stcb);
3293		break;
3294	case SCTP_HEARTBEAT_REQUEST:
3295		/* resend a demand HB */
3296		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3297			/*
3298			 * Only retransmit if we KNOW we wont destroy the
3299			 * tcb
3300			 */
3301			(void)sctp_send_hb(stcb, 1, net);
3302		}
3303		break;
3304	case SCTP_SHUTDOWN:
3305		sctp_send_shutdown(stcb, net);
3306		break;
3307	case SCTP_SHUTDOWN_ACK:
3308		sctp_send_shutdown_ack(stcb, net);
3309		break;
3310	case SCTP_COOKIE_ECHO:
3311		{
3312			struct sctp_tmit_chunk *cookie;
3313
3314			cookie = NULL;
3315			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3316			    sctp_next) {
3317				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3318					break;
3319				}
3320			}
3321			if (cookie) {
3322				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3323					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3324				cookie->sent = SCTP_DATAGRAM_RESEND;
3325				sctp_stop_all_cookie_timers(stcb);
3326			}
3327		}
3328		break;
3329	case SCTP_COOKIE_ACK:
3330		sctp_send_cookie_ack(stcb);
3331		break;
3332	case SCTP_ASCONF_ACK:
3333		/* resend last asconf ack */
3334		sctp_send_asconf_ack(stcb);
3335		break;
3336	case SCTP_FORWARD_CUM_TSN:
3337		send_forward_tsn(stcb, &stcb->asoc);
3338		break;
3339		/* can't do anything with these */
3340	case SCTP_PACKET_DROPPED:
3341	case SCTP_INITIATION_ACK:	/* this should not happen */
3342	case SCTP_HEARTBEAT_ACK:
3343	case SCTP_ABORT_ASSOCIATION:
3344	case SCTP_OPERATION_ERROR:
3345	case SCTP_SHUTDOWN_COMPLETE:
3346	case SCTP_ECN_ECHO:
3347	case SCTP_ECN_CWR:
3348	default:
3349		break;
3350	}
3351	return (0);
3352}
3353
3354void
3355sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3356{
3357	int i;
3358	uint16_t temp;
3359
3360	/*
3361	 * We set things to 0xffff since this is the last delivered sequence
3362	 * and we will be sending in 0 after the reset.
3363	 */
3364
3365	if (number_entries) {
3366		for (i = 0; i < number_entries; i++) {
3367			temp = ntohs(list[i]);
3368			if (temp >= stcb->asoc.streamincnt) {
3369				continue;
3370			}
3371			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3372		}
3373	} else {
3374		list = NULL;
3375		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3376			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3377		}
3378	}
3379	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3380}
3381
3382static void
3383sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3384{
3385	int i;
3386
3387	if (number_entries == 0) {
3388		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3389			stcb->asoc.strmout[i].next_sequence_sent = 0;
3390		}
3391	} else if (number_entries) {
3392		for (i = 0; i < number_entries; i++) {
3393			uint16_t temp;
3394
3395			temp = ntohs(list[i]);
3396			if (temp >= stcb->asoc.streamoutcnt) {
3397				/* no such stream */
3398				continue;
3399			}
3400			stcb->asoc.strmout[temp].next_sequence_sent = 0;
3401		}
3402	}
3403	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3404}
3405
3406
3407struct sctp_stream_reset_out_request *
3408sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3409{
3410	struct sctp_association *asoc;
3411	struct sctp_stream_reset_out_req *req;
3412	struct sctp_stream_reset_out_request *r;
3413	struct sctp_tmit_chunk *chk;
3414	int len, clen;
3415
3416	asoc = &stcb->asoc;
3417	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3418		asoc->stream_reset_outstanding = 0;
3419		return (NULL);
3420	}
3421	if (stcb->asoc.str_reset == NULL) {
3422		asoc->stream_reset_outstanding = 0;
3423		return (NULL);
3424	}
3425	chk = stcb->asoc.str_reset;
3426	if (chk->data == NULL) {
3427		return (NULL);
3428	}
3429	if (bchk) {
3430		/* he wants a copy of the chk pointer */
3431		*bchk = chk;
3432	}
3433	clen = chk->send_size;
3434	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
3435	r = &req->sr_req;
3436	if (ntohl(r->request_seq) == seq) {
3437		/* found it */
3438		return (r);
3439	}
3440	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3441	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3442		/* move to the next one, there can only be a max of two */
3443		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3444		if (ntohl(r->request_seq) == seq) {
3445			return (r);
3446		}
3447	}
3448	/* that seq is not here */
3449	return (NULL);
3450}
3451
3452static void
3453sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3454{
3455	struct sctp_association *asoc;
3456	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3457
3458	if (stcb->asoc.str_reset == NULL) {
3459		return;
3460	}
3461	asoc = &stcb->asoc;
3462
3463	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3464	TAILQ_REMOVE(&asoc->control_send_queue,
3465	    chk,
3466	    sctp_next);
3467	if (chk->data) {
3468		sctp_m_freem(chk->data);
3469		chk->data = NULL;
3470	}
3471	asoc->ctrl_queue_cnt--;
3472	sctp_free_a_chunk(stcb, chk);
3473	/* sa_ignore NO_NULL_CHK */
3474	stcb->asoc.str_reset = NULL;
3475}
3476
3477
3478static int
3479sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3480    uint32_t seq, uint32_t action,
3481    struct sctp_stream_reset_response *respin)
3482{
3483	uint16_t type;
3484	int lparm_len;
3485	struct sctp_association *asoc = &stcb->asoc;
3486	struct sctp_tmit_chunk *chk;
3487	struct sctp_stream_reset_out_request *srparam;
3488	int number_entries;
3489
3490	if (asoc->stream_reset_outstanding == 0) {
3491		/* duplicate */
3492		return (0);
3493	}
3494	if (seq == stcb->asoc.str_reset_seq_out) {
3495		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3496		if (srparam) {
3497			stcb->asoc.str_reset_seq_out++;
3498			type = ntohs(srparam->ph.param_type);
3499			lparm_len = ntohs(srparam->ph.param_length);
3500			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3501				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3502				asoc->stream_reset_out_is_outstanding = 0;
3503				if (asoc->stream_reset_outstanding)
3504					asoc->stream_reset_outstanding--;
3505				if (action == SCTP_STREAM_RESET_PERFORMED) {
3506					/* do it */
3507					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3508				} else {
3509					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3510				}
3511			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3512				/* Answered my request */
3513				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3514				if (asoc->stream_reset_outstanding)
3515					asoc->stream_reset_outstanding--;
3516				if (action != SCTP_STREAM_RESET_PERFORMED) {
3517					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3518				}
3519			} else if (type == SCTP_STR_RESET_ADD_STREAMS) {
3520				/* Ok we now may have more streams */
3521				if (asoc->stream_reset_outstanding)
3522					asoc->stream_reset_outstanding--;
3523				if (action == SCTP_STREAM_RESET_PERFORMED) {
3524					/* Put the new streams into effect */
3525					stcb->asoc.streamoutcnt = stcb->asoc.strm_realoutsize;
3526					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_OK, stcb,
3527					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3528				} else {
3529					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_FAIL, stcb,
3530					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3531				}
3532			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3533				/**
3534				 * a) Adopt the new in tsn.
3535				 * b) reset the map
3536				 * c) Adopt the new out-tsn
3537				 */
3538				struct sctp_stream_reset_response_tsn *resp;
3539				struct sctp_forward_tsn_chunk fwdtsn;
3540				int abort_flag = 0;
3541
3542				if (respin == NULL) {
3543					/* huh ? */
3544					return (0);
3545				}
3546				if (action == SCTP_STREAM_RESET_PERFORMED) {
3547					resp = (struct sctp_stream_reset_response_tsn *)respin;
3548					asoc->stream_reset_outstanding--;
3549					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3550					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3551					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3552					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3553					if (abort_flag) {
3554						return (1);
3555					}
3556					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3557					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3558						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3559					}
3560					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3561					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3562					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3563
3564					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3565					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3566
3567					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3568					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3569
3570					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3571					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3572
3573				}
3574			}
3575			/* get rid of the request and get the request flags */
3576			if (asoc->stream_reset_outstanding == 0) {
3577				sctp_clean_up_stream_reset(stcb);
3578			}
3579		}
3580	}
3581	return (0);
3582}
3583
3584static void
3585sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3586    struct sctp_tmit_chunk *chk,
3587    struct sctp_stream_reset_in_request *req, int trunc)
3588{
3589	uint32_t seq;
3590	int len, i;
3591	int number_entries;
3592	uint16_t temp;
3593
3594	/*
3595	 * peer wants me to send a str-reset to him for my outgoing seq's if
3596	 * seq_in is right.
3597	 */
3598	struct sctp_association *asoc = &stcb->asoc;
3599
3600	seq = ntohl(req->request_seq);
3601	if (asoc->str_reset_seq_in == seq) {
3602		if (trunc) {
3603			/* Can't do it, since they exceeded our buffer size  */
3604			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3605			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3606			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3607		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3608			len = ntohs(req->ph.param_length);
3609			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3610			for (i = 0; i < number_entries; i++) {
3611				temp = ntohs(req->list_of_streams[i]);
3612				req->list_of_streams[i] = temp;
3613			}
3614			/* move the reset action back one */
3615			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3616			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3617			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3618			    asoc->str_reset_seq_out,
3619			    seq, (asoc->sending_seq - 1));
3620			asoc->stream_reset_out_is_outstanding = 1;
3621			asoc->str_reset = chk;
3622			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3623			stcb->asoc.stream_reset_outstanding++;
3624		} else {
3625			/* Can't do it, since we have sent one out */
3626			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3627			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
3628			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3629		}
3630		asoc->str_reset_seq_in++;
3631	} else if (asoc->str_reset_seq_in - 1 == seq) {
3632		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3633	} else if (asoc->str_reset_seq_in - 2 == seq) {
3634		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3635	} else {
3636		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3637	}
3638}
3639
3640static int
3641sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3642    struct sctp_tmit_chunk *chk,
3643    struct sctp_stream_reset_tsn_request *req)
3644{
3645	/* reset all in and out and update the tsn */
3646	/*
3647	 * A) reset my str-seq's on in and out. B) Select a receive next,
3648	 * and set cum-ack to it. Also process this selected number as a
3649	 * fwd-tsn as well. C) set in the response my next sending seq.
3650	 */
3651	struct sctp_forward_tsn_chunk fwdtsn;
3652	struct sctp_association *asoc = &stcb->asoc;
3653	int abort_flag = 0;
3654	uint32_t seq;
3655
3656	seq = ntohl(req->request_seq);
3657	if (asoc->str_reset_seq_in == seq) {
3658		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3659		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3660		fwdtsn.ch.chunk_flags = 0;
3661		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3662		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3663		if (abort_flag) {
3664			return (1);
3665		}
3666		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3667		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3668			sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3669		}
3670		stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3671		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3672		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3673		stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3674		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3675		atomic_add_int(&stcb->asoc.sending_seq, 1);
3676		/* save off historical data for retrans */
3677		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
3678		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
3679		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
3680		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
3681
3682		sctp_add_stream_reset_result_tsn(chk,
3683		    ntohl(req->request_seq),
3684		    SCTP_STREAM_RESET_PERFORMED,
3685		    stcb->asoc.sending_seq,
3686		    stcb->asoc.mapping_array_base_tsn);
3687		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3688		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3689		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3690		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3691
3692		asoc->str_reset_seq_in++;
3693	} else if (asoc->str_reset_seq_in - 1 == seq) {
3694		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3695		    stcb->asoc.last_sending_seq[0],
3696		    stcb->asoc.last_base_tsnsent[0]
3697		    );
3698	} else if (asoc->str_reset_seq_in - 2 == seq) {
3699		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3700		    stcb->asoc.last_sending_seq[1],
3701		    stcb->asoc.last_base_tsnsent[1]
3702		    );
3703	} else {
3704		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3705	}
3706	return (0);
3707}
3708
3709static void
3710sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3711    struct sctp_tmit_chunk *chk,
3712    struct sctp_stream_reset_out_request *req, int trunc)
3713{
3714	uint32_t seq, tsn;
3715	int number_entries, len;
3716	struct sctp_association *asoc = &stcb->asoc;
3717
3718	seq = ntohl(req->request_seq);
3719
3720	/* now if its not a duplicate we process it */
3721	if (asoc->str_reset_seq_in == seq) {
3722		len = ntohs(req->ph.param_length);
3723		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3724		/*
3725		 * the sender is resetting, handle the list issue.. we must
3726		 * a) verify if we can do the reset, if so no problem b) If
3727		 * we can't do the reset we must copy the request. c) queue
3728		 * it, and setup the data in processor to trigger it off
3729		 * when needed and dequeue all the queued data.
3730		 */
3731		tsn = ntohl(req->send_reset_at_tsn);
3732
3733		/* move the reset action back one */
3734		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3735		if (trunc) {
3736			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3737			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3738		} else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
3739			/* we can do it now */
3740			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3741			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3742			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3743		} else {
3744			/*
3745			 * we must queue it up and thus wait for the TSN's
3746			 * to arrive that are at or before tsn
3747			 */
3748			struct sctp_stream_reset_list *liste;
3749			int siz;
3750
3751			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3752			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3753			    siz, SCTP_M_STRESET);
3754			if (liste == NULL) {
3755				/* gak out of memory */
3756				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3757				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3758				return;
3759			}
3760			liste->tsn = tsn;
3761			liste->number_entries = number_entries;
3762			memcpy(&liste->req, req,
3763			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3764			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3765			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3766			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3767		}
3768		asoc->str_reset_seq_in++;
3769	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3770		/*
3771		 * one seq back, just echo back last action since my
3772		 * response was lost.
3773		 */
3774		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3775	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3776		/*
3777		 * two seq back, just echo back last action since my
3778		 * response was lost.
3779		 */
3780		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3781	} else {
3782		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3783	}
3784}
3785
3786static void
3787sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3788    struct sctp_stream_reset_add_strm *str_add)
3789{
3790	/*
3791	 * Peer is requesting to add more streams. If its within our
3792	 * max-streams we will allow it.
3793	 */
3794	uint16_t num_stream, i;
3795	uint32_t seq;
3796	struct sctp_association *asoc = &stcb->asoc;
3797	struct sctp_queued_to_read *ctl, *nctl;
3798
3799	/* Get the number. */
3800	seq = ntohl(str_add->request_seq);
3801	num_stream = ntohs(str_add->number_of_streams);
3802	/* Now what would be the new total? */
3803	if (asoc->str_reset_seq_in == seq) {
3804		num_stream += stcb->asoc.streamincnt;
3805		if (num_stream > stcb->asoc.max_inbound_streams) {
3806			/* We must reject it they ask for to many */
3807	denied:
3808			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3809			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3810			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3811		} else {
3812			/* Ok, we can do that :-) */
3813			struct sctp_stream_in *oldstrm;
3814
3815			/* save off the old */
3816			oldstrm = stcb->asoc.strmin;
3817			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
3818			    (num_stream * sizeof(struct sctp_stream_in)),
3819			    SCTP_M_STRMI);
3820			if (stcb->asoc.strmin == NULL) {
3821				stcb->asoc.strmin = oldstrm;
3822				goto denied;
3823			}
3824			/* copy off the old data */
3825			for (i = 0; i < stcb->asoc.streamincnt; i++) {
3826				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3827				stcb->asoc.strmin[i].stream_no = i;
3828				stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
3829				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
3830				/* now anything on those queues? */
3831				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next, nctl) {
3832					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next);
3833					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next);
3834				}
3835			}
3836			/* Init the new streams */
3837			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
3838				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3839				stcb->asoc.strmin[i].stream_no = i;
3840				stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3841				stcb->asoc.strmin[i].delivery_started = 0;
3842			}
3843			SCTP_FREE(oldstrm, SCTP_M_STRMI);
3844			/* update the size */
3845			stcb->asoc.streamincnt = num_stream;
3846			/* Send the ack */
3847			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3848			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3849			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3850			sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK, stcb,
3851			    (uint32_t) stcb->asoc.streamincnt, NULL, SCTP_SO_NOT_LOCKED);
3852		}
3853	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3854		/*
3855		 * one seq back, just echo back last action since my
3856		 * response was lost.
3857		 */
3858		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3859	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3860		/*
3861		 * two seq back, just echo back last action since my
3862		 * response was lost.
3863		 */
3864		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3865	} else {
3866		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3867
3868	}
3869}
3870
3871#ifdef __GNUC__
3872__attribute__((noinline))
3873#endif
3874	static int
3875	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3876        struct sctp_stream_reset_out_req *sr_req)
3877{
3878	int chk_length, param_len, ptype;
3879	struct sctp_paramhdr pstore;
3880	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3881
3882	uint32_t seq;
3883	int num_req = 0;
3884	int trunc = 0;
3885	struct sctp_tmit_chunk *chk;
3886	struct sctp_chunkhdr *ch;
3887	struct sctp_paramhdr *ph;
3888	int ret_code = 0;
3889	int num_param = 0;
3890
3891	/* now it may be a reset or a reset-response */
3892	chk_length = ntohs(sr_req->ch.chunk_length);
3893
3894	/* setup for adding the response */
3895	sctp_alloc_a_chunk(stcb, chk);
3896	if (chk == NULL) {
3897		return (ret_code);
3898	}
3899	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3900	chk->rec.chunk_id.can_take_data = 0;
3901	chk->asoc = &stcb->asoc;
3902	chk->no_fr_allowed = 0;
3903	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3904	chk->book_size_scale = 0;
3905	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3906	if (chk->data == NULL) {
3907strres_nochunk:
3908		if (chk->data) {
3909			sctp_m_freem(chk->data);
3910			chk->data = NULL;
3911		}
3912		sctp_free_a_chunk(stcb, chk);
3913		return (ret_code);
3914	}
3915	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3916
3917	/* setup chunk parameters */
3918	chk->sent = SCTP_DATAGRAM_UNSENT;
3919	chk->snd_count = 0;
3920	chk->whoTo = stcb->asoc.primary_destination;
3921	atomic_add_int(&chk->whoTo->ref_count, 1);
3922
3923	ch = mtod(chk->data, struct sctp_chunkhdr *);
3924	ch->chunk_type = SCTP_STREAM_RESET;
3925	ch->chunk_flags = 0;
3926	ch->chunk_length = htons(chk->send_size);
3927	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3928	offset += sizeof(struct sctp_chunkhdr);
3929	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3930		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
3931		if (ph == NULL)
3932			break;
3933		param_len = ntohs(ph->param_length);
3934		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3935			/* bad param */
3936			break;
3937		}
3938		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
3939		    (uint8_t *) & cstore);
3940		ptype = ntohs(ph->param_type);
3941		num_param++;
3942		if (param_len > (int)sizeof(cstore)) {
3943			trunc = 1;
3944		} else {
3945			trunc = 0;
3946		}
3947
3948		if (num_param > SCTP_MAX_RESET_PARAMS) {
3949			/* hit the max of parameters already sorry.. */
3950			break;
3951		}
3952		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3953			struct sctp_stream_reset_out_request *req_out;
3954
3955			req_out = (struct sctp_stream_reset_out_request *)ph;
3956			num_req++;
3957			if (stcb->asoc.stream_reset_outstanding) {
3958				seq = ntohl(req_out->response_seq);
3959				if (seq == stcb->asoc.str_reset_seq_out) {
3960					/* implicit ack */
3961					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3962				}
3963			}
3964			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
3965		} else if (ptype == SCTP_STR_RESET_ADD_STREAMS) {
3966			struct sctp_stream_reset_add_strm *str_add;
3967
3968			str_add = (struct sctp_stream_reset_add_strm *)ph;
3969			num_req++;
3970			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
3971		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3972			struct sctp_stream_reset_in_request *req_in;
3973
3974			num_req++;
3975
3976			req_in = (struct sctp_stream_reset_in_request *)ph;
3977
3978			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
3979		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3980			struct sctp_stream_reset_tsn_request *req_tsn;
3981
3982			num_req++;
3983			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3984
3985			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3986				ret_code = 1;
3987				goto strres_nochunk;
3988			}
3989			/* no more */
3990			break;
3991		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3992			struct sctp_stream_reset_response *resp;
3993			uint32_t result;
3994
3995			resp = (struct sctp_stream_reset_response *)ph;
3996			seq = ntohl(resp->response_seq);
3997			result = ntohl(resp->result);
3998			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3999				ret_code = 1;
4000				goto strres_nochunk;
4001			}
4002		} else {
4003			break;
4004		}
4005		offset += SCTP_SIZE32(param_len);
4006		chk_length -= SCTP_SIZE32(param_len);
4007	}
4008	if (num_req == 0) {
4009		/* we have no response free the stuff */
4010		goto strres_nochunk;
4011	}
4012	/* ok we have a chunk to link in */
4013	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4014	    chk,
4015	    sctp_next);
4016	stcb->asoc.ctrl_queue_cnt++;
4017	return (ret_code);
4018}
4019
4020/*
4021 * Handle a router or endpoints report of a packet loss, there are two ways
4022 * to handle this, either we get the whole packet and must disect it
4023 * ourselves (possibly with truncation and or corruption) or it is a summary
4024 * from a middle box that did the disectting for us.
4025 */
4026static void
4027sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4028    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4029{
4030	uint32_t bottle_bw, on_queue;
4031	uint16_t trunc_len;
4032	unsigned int chlen;
4033	unsigned int at;
4034	struct sctp_chunk_desc desc;
4035	struct sctp_chunkhdr *ch;
4036
4037	chlen = ntohs(cp->ch.chunk_length);
4038	chlen -= sizeof(struct sctp_pktdrop_chunk);
4039	/* XXX possible chlen underflow */
4040	if (chlen == 0) {
4041		ch = NULL;
4042		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4043			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4044	} else {
4045		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4046		chlen -= sizeof(struct sctphdr);
4047		/* XXX possible chlen underflow */
4048		memset(&desc, 0, sizeof(desc));
4049	}
4050	trunc_len = (uint16_t) ntohs(cp->trunc_len);
4051	if (trunc_len > limit) {
4052		trunc_len = limit;
4053	}
4054	/* now the chunks themselves */
4055	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4056		desc.chunk_type = ch->chunk_type;
4057		/* get amount we need to move */
4058		at = ntohs(ch->chunk_length);
4059		if (at < sizeof(struct sctp_chunkhdr)) {
4060			/* corrupt chunk, maybe at the end? */
4061			SCTP_STAT_INCR(sctps_pdrpcrupt);
4062			break;
4063		}
4064		if (trunc_len == 0) {
4065			/* we are supposed to have all of it */
4066			if (at > chlen) {
4067				/* corrupt skip it */
4068				SCTP_STAT_INCR(sctps_pdrpcrupt);
4069				break;
4070			}
4071		} else {
4072			/* is there enough of it left ? */
4073			if (desc.chunk_type == SCTP_DATA) {
4074				if (chlen < (sizeof(struct sctp_data_chunk) +
4075				    sizeof(desc.data_bytes))) {
4076					break;
4077				}
4078			} else {
4079				if (chlen < sizeof(struct sctp_chunkhdr)) {
4080					break;
4081				}
4082			}
4083		}
4084		if (desc.chunk_type == SCTP_DATA) {
4085			/* can we get out the tsn? */
4086			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4087				SCTP_STAT_INCR(sctps_pdrpmbda);
4088
4089			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4090				/* yep */
4091				struct sctp_data_chunk *dcp;
4092				uint8_t *ddp;
4093				unsigned int iii;
4094
4095				dcp = (struct sctp_data_chunk *)ch;
4096				ddp = (uint8_t *) (dcp + 1);
4097				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4098					desc.data_bytes[iii] = ddp[iii];
4099				}
4100				desc.tsn_ifany = dcp->dp.tsn;
4101			} else {
4102				/* nope we are done. */
4103				SCTP_STAT_INCR(sctps_pdrpnedat);
4104				break;
4105			}
4106		} else {
4107			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4108				SCTP_STAT_INCR(sctps_pdrpmbct);
4109		}
4110
4111		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4112			SCTP_STAT_INCR(sctps_pdrppdbrk);
4113			break;
4114		}
4115		if (SCTP_SIZE32(at) > chlen) {
4116			break;
4117		}
4118		chlen -= SCTP_SIZE32(at);
4119		if (chlen < sizeof(struct sctp_chunkhdr)) {
4120			/* done, none left */
4121			break;
4122		}
4123		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4124	}
4125	/* Now update any rwnd --- possibly */
4126	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4127		/* From a peer, we get a rwnd report */
4128		uint32_t a_rwnd;
4129
4130		SCTP_STAT_INCR(sctps_pdrpfehos);
4131
4132		bottle_bw = ntohl(cp->bottle_bw);
4133		on_queue = ntohl(cp->current_onq);
4134		if (bottle_bw && on_queue) {
4135			/* a rwnd report is in here */
4136			if (bottle_bw > on_queue)
4137				a_rwnd = bottle_bw - on_queue;
4138			else
4139				a_rwnd = 0;
4140
4141			if (a_rwnd == 0)
4142				stcb->asoc.peers_rwnd = 0;
4143			else {
4144				if (a_rwnd > stcb->asoc.total_flight) {
4145					stcb->asoc.peers_rwnd =
4146					    a_rwnd - stcb->asoc.total_flight;
4147				} else {
4148					stcb->asoc.peers_rwnd = 0;
4149				}
4150				if (stcb->asoc.peers_rwnd <
4151				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4152					/* SWS sender side engages */
4153					stcb->asoc.peers_rwnd = 0;
4154				}
4155			}
4156		}
4157	} else {
4158		SCTP_STAT_INCR(sctps_pdrpfmbox);
4159	}
4160
4161	/* now middle boxes in sat networks get a cwnd bump */
4162	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4163	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4164	    (stcb->asoc.sat_network)) {
4165		/*
4166		 * This is debateable but for sat networks it makes sense
4167		 * Note if a T3 timer has went off, we will prohibit any
4168		 * changes to cwnd until we exit the t3 loss recovery.
4169		 */
4170		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4171		    net, cp, &bottle_bw, &on_queue);
4172	}
4173}
4174
4175/*
4176 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4177 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4178 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4179 * length of the complete packet outputs: - length: modified to remaining
4180 * length after control processing - netp: modified to new sctp_nets after
4181 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4182 * bad packet,...) otherwise return the tcb for this packet
4183 */
4184#ifdef __GNUC__
4185__attribute__((noinline))
4186#endif
4187	static struct sctp_tcb *
4188	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4189             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4190             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4191             uint32_t vrf_id, uint16_t port)
4192{
4193	struct sctp_association *asoc;
4194	uint32_t vtag_in;
4195	int num_chunks = 0;	/* number of control chunks processed */
4196	uint32_t chk_length;
4197	int ret;
4198	int abort_no_unlock = 0;
4199
4200	/*
4201	 * How big should this be, and should it be alloc'd? Lets try the
4202	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4203	 * until we get into jumbo grams and such..
4204	 */
4205	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4206	struct sctp_tcb *locked_tcb = stcb;
4207	int got_auth = 0;
4208	uint32_t auth_offset = 0, auth_len = 0;
4209	int auth_skipped = 0;
4210	int asconf_cnt = 0;
4211
4212#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4213	struct socket *so;
4214
4215#endif
4216
4217	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4218	    iphlen, *offset, length, stcb);
4219
4220	/* validate chunk header length... */
4221	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4222		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4223		    ntohs(ch->chunk_length));
4224		if (locked_tcb) {
4225			SCTP_TCB_UNLOCK(locked_tcb);
4226		}
4227		return (NULL);
4228	}
4229	/*
4230	 * validate the verification tag
4231	 */
4232	vtag_in = ntohl(sh->v_tag);
4233
4234	if (locked_tcb) {
4235		SCTP_TCB_LOCK_ASSERT(locked_tcb);
4236	}
4237	if (ch->chunk_type == SCTP_INITIATION) {
4238		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4239		    ntohs(ch->chunk_length), vtag_in);
4240		if (vtag_in != 0) {
4241			/* protocol error- silently discard... */
4242			SCTP_STAT_INCR(sctps_badvtag);
4243			if (locked_tcb) {
4244				SCTP_TCB_UNLOCK(locked_tcb);
4245			}
4246			return (NULL);
4247		}
4248	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4249		/*
4250		 * If there is no stcb, skip the AUTH chunk and process
4251		 * later after a stcb is found (to validate the lookup was
4252		 * valid.
4253		 */
4254		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4255		    (stcb == NULL) &&
4256		    !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4257			/* save this chunk for later processing */
4258			auth_skipped = 1;
4259			auth_offset = *offset;
4260			auth_len = ntohs(ch->chunk_length);
4261
4262			/* (temporarily) move past this chunk */
4263			*offset += SCTP_SIZE32(auth_len);
4264			if (*offset >= length) {
4265				/* no more data left in the mbuf chain */
4266				*offset = length;
4267				if (locked_tcb) {
4268					SCTP_TCB_UNLOCK(locked_tcb);
4269				}
4270				return (NULL);
4271			}
4272			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4273			    sizeof(struct sctp_chunkhdr), chunk_buf);
4274		}
4275		if (ch == NULL) {
4276			/* Help */
4277			*offset = length;
4278			if (locked_tcb) {
4279				SCTP_TCB_UNLOCK(locked_tcb);
4280			}
4281			return (NULL);
4282		}
4283		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4284			goto process_control_chunks;
4285		}
4286		/*
4287		 * first check if it's an ASCONF with an unknown src addr we
4288		 * need to look inside to find the association
4289		 */
4290		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4291			struct sctp_chunkhdr *asconf_ch = ch;
4292			uint32_t asconf_offset = 0, asconf_len = 0;
4293
4294			/* inp's refcount may be reduced */
4295			SCTP_INP_INCR_REF(inp);
4296
4297			asconf_offset = *offset;
4298			do {
4299				asconf_len = ntohs(asconf_ch->chunk_length);
4300				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4301					break;
4302				stcb = sctp_findassociation_ep_asconf(m, iphlen,
4303				    *offset, sh, &inp, netp, vrf_id);
4304				if (stcb != NULL)
4305					break;
4306				asconf_offset += SCTP_SIZE32(asconf_len);
4307				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4308				    sizeof(struct sctp_chunkhdr), chunk_buf);
4309			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4310			if (stcb == NULL) {
4311				/*
4312				 * reduce inp's refcount if not reduced in
4313				 * sctp_findassociation_ep_asconf().
4314				 */
4315				SCTP_INP_DECR_REF(inp);
4316			} else {
4317				locked_tcb = stcb;
4318			}
4319
4320			/* now go back and verify any auth chunk to be sure */
4321			if (auth_skipped && (stcb != NULL)) {
4322				struct sctp_auth_chunk *auth;
4323
4324				auth = (struct sctp_auth_chunk *)
4325				    sctp_m_getptr(m, auth_offset,
4326				    auth_len, chunk_buf);
4327				got_auth = 1;
4328				auth_skipped = 0;
4329				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4330				    auth_offset)) {
4331					/* auth HMAC failed so dump it */
4332					*offset = length;
4333					if (locked_tcb) {
4334						SCTP_TCB_UNLOCK(locked_tcb);
4335					}
4336					return (NULL);
4337				} else {
4338					/* remaining chunks are HMAC checked */
4339					stcb->asoc.authenticated = 1;
4340				}
4341			}
4342		}
4343		if (stcb == NULL) {
4344			/* no association, so it's out of the blue... */
4345			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
4346			    vrf_id, port);
4347			*offset = length;
4348			if (locked_tcb) {
4349				SCTP_TCB_UNLOCK(locked_tcb);
4350			}
4351			return (NULL);
4352		}
4353		asoc = &stcb->asoc;
4354		/* ABORT and SHUTDOWN can use either v_tag... */
4355		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4356		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4357		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4358			if ((vtag_in == asoc->my_vtag) ||
4359			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
4360			    (vtag_in == asoc->peer_vtag))) {
4361				/* this is valid */
4362			} else {
4363				/* drop this packet... */
4364				SCTP_STAT_INCR(sctps_badvtag);
4365				if (locked_tcb) {
4366					SCTP_TCB_UNLOCK(locked_tcb);
4367				}
4368				return (NULL);
4369			}
4370		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4371			if (vtag_in != asoc->my_vtag) {
4372				/*
4373				 * this could be a stale SHUTDOWN-ACK or the
4374				 * peer never got the SHUTDOWN-COMPLETE and
4375				 * is still hung; we have started a new asoc
4376				 * but it won't complete until the shutdown
4377				 * is completed
4378				 */
4379				if (locked_tcb) {
4380					SCTP_TCB_UNLOCK(locked_tcb);
4381				}
4382				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
4383				    NULL, vrf_id, port);
4384				return (NULL);
4385			}
4386		} else {
4387			/* for all other chunks, vtag must match */
4388			if (vtag_in != asoc->my_vtag) {
4389				/* invalid vtag... */
4390				SCTPDBG(SCTP_DEBUG_INPUT3,
4391				    "invalid vtag: %xh, expect %xh\n",
4392				    vtag_in, asoc->my_vtag);
4393				SCTP_STAT_INCR(sctps_badvtag);
4394				if (locked_tcb) {
4395					SCTP_TCB_UNLOCK(locked_tcb);
4396				}
4397				*offset = length;
4398				return (NULL);
4399			}
4400		}
4401	}			/* end if !SCTP_COOKIE_ECHO */
4402	/*
4403	 * process all control chunks...
4404	 */
4405	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4406	/* EY */
4407	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4408	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4409	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4410		/* implied cookie-ack.. we must have lost the ack */
4411		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4412			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4413			    stcb->asoc.overall_error_count,
4414			    0,
4415			    SCTP_FROM_SCTP_INPUT,
4416			    __LINE__);
4417		}
4418		stcb->asoc.overall_error_count = 0;
4419		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4420		    *netp);
4421	}
4422process_control_chunks:
4423	while (IS_SCTP_CONTROL(ch)) {
4424		/* validate chunk length */
4425		chk_length = ntohs(ch->chunk_length);
4426		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4427		    ch->chunk_type, chk_length);
4428		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4429		if (chk_length < sizeof(*ch) ||
4430		    (*offset + (int)chk_length) > length) {
4431			*offset = length;
4432			if (locked_tcb) {
4433				SCTP_TCB_UNLOCK(locked_tcb);
4434			}
4435			return (NULL);
4436		}
4437		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4438		/*
4439		 * INIT-ACK only gets the init ack "header" portion only
4440		 * because we don't have to process the peer's COOKIE. All
4441		 * others get a complete chunk.
4442		 */
4443		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4444		    (ch->chunk_type == SCTP_INITIATION)) {
4445			/* get an init-ack chunk */
4446			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4447			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4448			if (ch == NULL) {
4449				*offset = length;
4450				if (locked_tcb) {
4451					SCTP_TCB_UNLOCK(locked_tcb);
4452				}
4453				return (NULL);
4454			}
4455		} else {
4456			/* For cookies and all other chunks. */
4457			if (chk_length > sizeof(chunk_buf)) {
4458				/*
4459				 * use just the size of the chunk buffer so
4460				 * the front part of our chunks fit in
4461				 * contiguous space up to the chunk buffer
4462				 * size (508 bytes). For chunks that need to
4463				 * get more than that they must use the
4464				 * sctp_m_getptr() function or other means
4465				 * (e.g. know how to parse mbuf chains).
4466				 * Cookies do this already.
4467				 */
4468				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4469				    (sizeof(chunk_buf) - 4),
4470				    chunk_buf);
4471				if (ch == NULL) {
4472					*offset = length;
4473					if (locked_tcb) {
4474						SCTP_TCB_UNLOCK(locked_tcb);
4475					}
4476					return (NULL);
4477				}
4478			} else {
4479				/* We can fit it all */
4480				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4481				    chk_length, chunk_buf);
4482				if (ch == NULL) {
4483					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4484					*offset = length;
4485					if (locked_tcb) {
4486						SCTP_TCB_UNLOCK(locked_tcb);
4487					}
4488					return (NULL);
4489				}
4490			}
4491		}
4492		num_chunks++;
4493		/* Save off the last place we got a control from */
4494		if (stcb != NULL) {
4495			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4496				/*
4497				 * allow last_control to be NULL if
4498				 * ASCONF... ASCONF processing will find the
4499				 * right net later
4500				 */
4501				if ((netp != NULL) && (*netp != NULL))
4502					stcb->asoc.last_control_chunk_from = *netp;
4503			}
4504		}
4505#ifdef SCTP_AUDITING_ENABLED
4506		sctp_audit_log(0xB0, ch->chunk_type);
4507#endif
4508
4509		/* check to see if this chunk required auth, but isn't */
4510		if ((stcb != NULL) &&
4511		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
4512		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4513		    !stcb->asoc.authenticated) {
4514			/* "silently" ignore */
4515			SCTP_STAT_INCR(sctps_recvauthmissing);
4516			goto next_chunk;
4517		}
4518		switch (ch->chunk_type) {
4519		case SCTP_INITIATION:
4520			/* must be first and only chunk */
4521			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4522			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4523				/* We are not interested anymore? */
4524				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4525					/*
4526					 * collision case where we are
4527					 * sending to them too
4528					 */
4529					;
4530				} else {
4531					if (locked_tcb) {
4532						SCTP_TCB_UNLOCK(locked_tcb);
4533					}
4534					*offset = length;
4535					return (NULL);
4536				}
4537			}
4538			if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
4539			    (num_chunks > 1) ||
4540			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4541				*offset = length;
4542				if (locked_tcb) {
4543					SCTP_TCB_UNLOCK(locked_tcb);
4544				}
4545				return (NULL);
4546			}
4547			if ((stcb != NULL) &&
4548			    (SCTP_GET_STATE(&stcb->asoc) ==
4549			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4550				sctp_send_shutdown_ack(stcb,
4551				    stcb->asoc.primary_destination);
4552				*offset = length;
4553				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4554				if (locked_tcb) {
4555					SCTP_TCB_UNLOCK(locked_tcb);
4556				}
4557				return (NULL);
4558			}
4559			if (netp) {
4560				sctp_handle_init(m, iphlen, *offset, sh,
4561				    (struct sctp_init_chunk *)ch, inp,
4562				    stcb, *netp, &abort_no_unlock, vrf_id, port);
4563			}
4564			if (abort_no_unlock)
4565				return (NULL);
4566
4567			*offset = length;
4568			if (locked_tcb) {
4569				SCTP_TCB_UNLOCK(locked_tcb);
4570			}
4571			return (NULL);
4572			break;
4573		case SCTP_PAD_CHUNK:
4574			break;
4575		case SCTP_INITIATION_ACK:
4576			/* must be first and only chunk */
4577			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4578			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4579				/* We are not interested anymore */
4580				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4581					;
4582				} else {
4583					if (locked_tcb != stcb) {
4584						/* Very unlikely */
4585						SCTP_TCB_UNLOCK(locked_tcb);
4586					}
4587					*offset = length;
4588					if (stcb) {
4589#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4590						so = SCTP_INP_SO(inp);
4591						atomic_add_int(&stcb->asoc.refcnt, 1);
4592						SCTP_TCB_UNLOCK(stcb);
4593						SCTP_SOCKET_LOCK(so, 1);
4594						SCTP_TCB_LOCK(stcb);
4595						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4596#endif
4597						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4598#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4599						SCTP_SOCKET_UNLOCK(so, 1);
4600#endif
4601					}
4602					return (NULL);
4603				}
4604			}
4605			if ((num_chunks > 1) ||
4606			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4607				*offset = length;
4608				if (locked_tcb) {
4609					SCTP_TCB_UNLOCK(locked_tcb);
4610				}
4611				return (NULL);
4612			}
4613			if ((netp) && (*netp)) {
4614				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
4615				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
4616			} else {
4617				ret = -1;
4618			}
4619			/*
4620			 * Special case, I must call the output routine to
4621			 * get the cookie echoed
4622			 */
4623			if (abort_no_unlock)
4624				return (NULL);
4625
4626			if ((stcb) && ret == 0)
4627				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4628			*offset = length;
4629			if (locked_tcb) {
4630				SCTP_TCB_UNLOCK(locked_tcb);
4631			}
4632			return (NULL);
4633			break;
4634		case SCTP_SELECTIVE_ACK:
4635			{
4636				struct sctp_sack_chunk *sack;
4637				int abort_now = 0;
4638				uint32_t a_rwnd, cum_ack;
4639				uint16_t num_seg, num_dup;
4640				uint8_t flags;
4641				int offset_seg, offset_dup;
4642
4643				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4644				SCTP_STAT_INCR(sctps_recvsacks);
4645				if (stcb == NULL) {
4646					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
4647					break;
4648				}
4649				if (chk_length < sizeof(struct sctp_sack_chunk)) {
4650					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
4651					break;
4652				}
4653				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4654					/*-
4655					 * If we have sent a shutdown-ack, we will pay no
4656					 * attention to a sack sent in to us since
4657					 * we don't care anymore.
4658					 */
4659					break;
4660				}
4661				sack = (struct sctp_sack_chunk *)ch;
4662				flags = ch->chunk_flags;
4663				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4664				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4665				num_dup = ntohs(sack->sack.num_dup_tsns);
4666				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4667				if (sizeof(struct sctp_sack_chunk) +
4668				    num_seg * sizeof(struct sctp_gap_ack_block) +
4669				    num_dup * sizeof(uint32_t) != chk_length) {
4670					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
4671					break;
4672				}
4673				offset_seg = *offset + sizeof(struct sctp_sack_chunk);
4674				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4675				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4676				    cum_ack, num_seg, a_rwnd);
4677				stcb->asoc.seen_a_sack_this_pkt = 1;
4678				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4679				    (num_seg == 0) &&
4680				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
4681				    (stcb->asoc.saw_sack_with_frags == 0) &&
4682				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4683				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4684				    ) {
4685					/*
4686					 * We have a SIMPLE sack having no
4687					 * prior segments and data on sent
4688					 * queue to be acked.. Use the
4689					 * faster path sack processing. We
4690					 * also allow window update sacks
4691					 * with no missing segments to go
4692					 * this way too.
4693					 */
4694					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now);
4695				} else {
4696					if (netp && *netp)
4697						sctp_handle_sack(m, offset_seg, offset_dup,
4698						    stcb, *netp,
4699						    num_seg, 0, num_dup, &abort_now, flags,
4700						    cum_ack, a_rwnd);
4701				}
4702				if (abort_now) {
4703					/* ABORT signal from sack processing */
4704					*offset = length;
4705					return (NULL);
4706				}
4707				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4708				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4709				    (stcb->asoc.stream_queue_cnt == 0)) {
4710					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4711				}
4712			}
4713			break;
4714			/*
4715			 * EY - nr_sack:  If the received chunk is an
4716			 * nr_sack chunk
4717			 */
4718		case SCTP_NR_SELECTIVE_ACK:
4719			{
4720				struct sctp_nr_sack_chunk *nr_sack;
4721				int abort_now = 0;
4722				uint32_t a_rwnd, cum_ack;
4723				uint16_t num_seg, num_nr_seg, num_dup;
4724				uint8_t flags;
4725				int offset_seg, offset_dup;
4726
4727				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
4728				SCTP_STAT_INCR(sctps_recvsacks);
4729				if (stcb == NULL) {
4730					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
4731					break;
4732				}
4733				if ((stcb->asoc.sctp_nr_sack_on_off == 0) ||
4734				    (stcb->asoc.peer_supports_nr_sack == 0)) {
4735					goto unknown_chunk;
4736				}
4737				if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
4738					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
4739					break;
4740				}
4741				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4742					/*-
4743					 * If we have sent a shutdown-ack, we will pay no
4744					 * attention to a sack sent in to us since
4745					 * we don't care anymore.
4746					 */
4747					break;
4748				}
4749				nr_sack = (struct sctp_nr_sack_chunk *)ch;
4750				flags = ch->chunk_flags;
4751				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4752				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4753				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4754				num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
4755				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
4756				if (sizeof(struct sctp_nr_sack_chunk) +
4757				    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
4758				    num_dup * sizeof(uint32_t) != chk_length) {
4759					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
4760					break;
4761				}
4762				offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
4763				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4764				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4765				    cum_ack, num_seg, a_rwnd);
4766				stcb->asoc.seen_a_sack_this_pkt = 1;
4767				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4768				    (num_seg == 0) && (num_nr_seg == 0) &&
4769				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
4770				    (stcb->asoc.saw_sack_with_frags == 0) &&
4771				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4772				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
4773					/*
4774					 * We have a SIMPLE sack having no
4775					 * prior segments and data on sent
4776					 * queue to be acked. Use the faster
4777					 * path sack processing. We also
4778					 * allow window update sacks with no
4779					 * missing segments to go this way
4780					 * too.
4781					 */
4782					sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
4783					    &abort_now);
4784				} else {
4785					if (netp && *netp)
4786						sctp_handle_sack(m, offset_seg, offset_dup,
4787						    stcb, *netp,
4788						    num_seg, num_nr_seg, num_dup, &abort_now, flags,
4789						    cum_ack, a_rwnd);
4790				}
4791				if (abort_now) {
4792					/* ABORT signal from sack processing */
4793					*offset = length;
4794					return (NULL);
4795				}
4796				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4797				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4798				    (stcb->asoc.stream_queue_cnt == 0)) {
4799					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4800				}
4801			}
4802			break;
4803
4804		case SCTP_HEARTBEAT_REQUEST:
4805			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
4806			if ((stcb) && netp && *netp) {
4807				SCTP_STAT_INCR(sctps_recvheartbeat);
4808				sctp_send_heartbeat_ack(stcb, m, *offset,
4809				    chk_length, *netp);
4810
4811				/* He's alive so give him credit */
4812				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4813					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4814					    stcb->asoc.overall_error_count,
4815					    0,
4816					    SCTP_FROM_SCTP_INPUT,
4817					    __LINE__);
4818				}
4819				stcb->asoc.overall_error_count = 0;
4820			}
4821			break;
4822		case SCTP_HEARTBEAT_ACK:
4823			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
4824			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
4825				/* Its not ours */
4826				*offset = length;
4827				if (locked_tcb) {
4828					SCTP_TCB_UNLOCK(locked_tcb);
4829				}
4830				return (NULL);
4831			}
4832			/* He's alive so give him credit */
4833			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4834				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4835				    stcb->asoc.overall_error_count,
4836				    0,
4837				    SCTP_FROM_SCTP_INPUT,
4838				    __LINE__);
4839			}
4840			stcb->asoc.overall_error_count = 0;
4841			SCTP_STAT_INCR(sctps_recvheartbeatack);
4842			if (netp && *netp)
4843				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
4844				    stcb, *netp);
4845			break;
4846		case SCTP_ABORT_ASSOCIATION:
4847			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
4848			    stcb);
4849			if ((stcb) && netp && *netp)
4850				sctp_handle_abort((struct sctp_abort_chunk *)ch,
4851				    stcb, *netp);
4852			*offset = length;
4853			return (NULL);
4854			break;
4855		case SCTP_SHUTDOWN:
4856			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
4857			    stcb);
4858			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
4859				*offset = length;
4860				if (locked_tcb) {
4861					SCTP_TCB_UNLOCK(locked_tcb);
4862				}
4863				return (NULL);
4864			}
4865			if (netp && *netp) {
4866				int abort_flag = 0;
4867
4868				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
4869				    stcb, *netp, &abort_flag);
4870				if (abort_flag) {
4871					*offset = length;
4872					return (NULL);
4873				}
4874			}
4875			break;
4876		case SCTP_SHUTDOWN_ACK:
4877			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
4878			if ((stcb) && (netp) && (*netp))
4879				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
4880			*offset = length;
4881			return (NULL);
4882			break;
4883
4884		case SCTP_OPERATION_ERROR:
4885			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
4886			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
4887
4888				*offset = length;
4889				return (NULL);
4890			}
4891			break;
4892		case SCTP_COOKIE_ECHO:
4893			SCTPDBG(SCTP_DEBUG_INPUT3,
4894			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
4895			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4896				;
4897			} else {
4898				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4899					/* We are not interested anymore */
4900			abend:
4901					if (stcb) {
4902						SCTP_TCB_UNLOCK(stcb);
4903					}
4904					*offset = length;
4905					return (NULL);
4906				}
4907			}
4908			/*
4909			 * First are we accepting? We do this again here
4910			 * since it is possible that a previous endpoint WAS
4911			 * listening responded to a INIT-ACK and then
4912			 * closed. We opened and bound.. and are now no
4913			 * longer listening.
4914			 */
4915
4916			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
4917				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4918				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
4919					struct mbuf *oper;
4920					struct sctp_paramhdr *phdr;
4921
4922					oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4923					    0, M_DONTWAIT, 1, MT_DATA);
4924					if (oper) {
4925						SCTP_BUF_LEN(oper) =
4926						    sizeof(struct sctp_paramhdr);
4927						phdr = mtod(oper,
4928						    struct sctp_paramhdr *);
4929						phdr->param_type =
4930						    htons(SCTP_CAUSE_OUT_OF_RESC);
4931						phdr->param_length =
4932						    htons(sizeof(struct sctp_paramhdr));
4933					}
4934					sctp_abort_association(inp, stcb, m,
4935					    iphlen, sh, oper, vrf_id, port);
4936				}
4937				*offset = length;
4938				return (NULL);
4939			} else {
4940				struct mbuf *ret_buf;
4941				struct sctp_inpcb *linp;
4942
4943				if (stcb) {
4944					linp = NULL;
4945				} else {
4946					linp = inp;
4947				}
4948
4949				if (linp) {
4950					SCTP_ASOC_CREATE_LOCK(linp);
4951					if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4952					    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4953						SCTP_ASOC_CREATE_UNLOCK(linp);
4954						goto abend;
4955					}
4956				}
4957				if (netp) {
4958					ret_buf =
4959					    sctp_handle_cookie_echo(m, iphlen,
4960					    *offset, sh,
4961					    (struct sctp_cookie_echo_chunk *)ch,
4962					    &inp, &stcb, netp,
4963					    auth_skipped,
4964					    auth_offset,
4965					    auth_len,
4966					    &locked_tcb,
4967					    vrf_id,
4968					    port);
4969				} else {
4970					ret_buf = NULL;
4971				}
4972				if (linp) {
4973					SCTP_ASOC_CREATE_UNLOCK(linp);
4974				}
4975				if (ret_buf == NULL) {
4976					if (locked_tcb) {
4977						SCTP_TCB_UNLOCK(locked_tcb);
4978					}
4979					SCTPDBG(SCTP_DEBUG_INPUT3,
4980					    "GAK, null buffer\n");
4981					auth_skipped = 0;
4982					*offset = length;
4983					return (NULL);
4984				}
4985				/* if AUTH skipped, see if it verified... */
4986				if (auth_skipped) {
4987					got_auth = 1;
4988					auth_skipped = 0;
4989				}
4990				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4991					/*
4992					 * Restart the timer if we have
4993					 * pending data
4994					 */
4995					struct sctp_tmit_chunk *chk;
4996
4997					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4998					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
4999				}
5000			}
5001			break;
5002		case SCTP_COOKIE_ACK:
5003			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
5004			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5005				if (locked_tcb) {
5006					SCTP_TCB_UNLOCK(locked_tcb);
5007				}
5008				return (NULL);
5009			}
5010			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5011				/* We are not interested anymore */
5012				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5013					;
5014				} else if (stcb) {
5015#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5016					so = SCTP_INP_SO(inp);
5017					atomic_add_int(&stcb->asoc.refcnt, 1);
5018					SCTP_TCB_UNLOCK(stcb);
5019					SCTP_SOCKET_LOCK(so, 1);
5020					SCTP_TCB_LOCK(stcb);
5021					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5022#endif
5023					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
5024#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5025					SCTP_SOCKET_UNLOCK(so, 1);
5026#endif
5027					*offset = length;
5028					return (NULL);
5029				}
5030			}
5031			/* He's alive so give him credit */
5032			if ((stcb) && netp && *netp) {
5033				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5034					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5035					    stcb->asoc.overall_error_count,
5036					    0,
5037					    SCTP_FROM_SCTP_INPUT,
5038					    __LINE__);
5039				}
5040				stcb->asoc.overall_error_count = 0;
5041				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
5042			}
5043			break;
5044		case SCTP_ECN_ECHO:
5045			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
5046			/* He's alive so give him credit */
5047			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5048				/* Its not ours */
5049				if (locked_tcb) {
5050					SCTP_TCB_UNLOCK(locked_tcb);
5051				}
5052				*offset = length;
5053				return (NULL);
5054			}
5055			if (stcb) {
5056				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5057					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5058					    stcb->asoc.overall_error_count,
5059					    0,
5060					    SCTP_FROM_SCTP_INPUT,
5061					    __LINE__);
5062				}
5063				stcb->asoc.overall_error_count = 0;
5064				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
5065				    stcb);
5066			}
5067			break;
5068		case SCTP_ECN_CWR:
5069			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
5070			/* He's alive so give him credit */
5071			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5072				/* Its not ours */
5073				if (locked_tcb) {
5074					SCTP_TCB_UNLOCK(locked_tcb);
5075				}
5076				*offset = length;
5077				return (NULL);
5078			}
5079			if (stcb) {
5080				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5081					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5082					    stcb->asoc.overall_error_count,
5083					    0,
5084					    SCTP_FROM_SCTP_INPUT,
5085					    __LINE__);
5086				}
5087				stcb->asoc.overall_error_count = 0;
5088				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5089			}
5090			break;
5091		case SCTP_SHUTDOWN_COMPLETE:
5092			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
5093			/* must be first and only chunk */
5094			if ((num_chunks > 1) ||
5095			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5096				*offset = length;
5097				if (locked_tcb) {
5098					SCTP_TCB_UNLOCK(locked_tcb);
5099				}
5100				return (NULL);
5101			}
5102			if ((stcb) && netp && *netp) {
5103				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5104				    stcb, *netp);
5105			}
5106			*offset = length;
5107			return (NULL);
5108			break;
5109		case SCTP_ASCONF:
5110			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5111			/* He's alive so give him credit */
5112			if (stcb) {
5113				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5114					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5115					    stcb->asoc.overall_error_count,
5116					    0,
5117					    SCTP_FROM_SCTP_INPUT,
5118					    __LINE__);
5119				}
5120				stcb->asoc.overall_error_count = 0;
5121				sctp_handle_asconf(m, *offset,
5122				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5123				asconf_cnt++;
5124			}
5125			break;
5126		case SCTP_ASCONF_ACK:
5127			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
5128			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5129				/* Its not ours */
5130				if (locked_tcb) {
5131					SCTP_TCB_UNLOCK(locked_tcb);
5132				}
5133				*offset = length;
5134				return (NULL);
5135			}
5136			if ((stcb) && netp && *netp) {
5137				/* He's alive so give him credit */
5138				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5139					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5140					    stcb->asoc.overall_error_count,
5141					    0,
5142					    SCTP_FROM_SCTP_INPUT,
5143					    __LINE__);
5144				}
5145				stcb->asoc.overall_error_count = 0;
5146				sctp_handle_asconf_ack(m, *offset,
5147				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5148				if (abort_no_unlock)
5149					return (NULL);
5150			}
5151			break;
5152		case SCTP_FORWARD_CUM_TSN:
5153			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
5154			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5155				/* Its not ours */
5156				if (locked_tcb) {
5157					SCTP_TCB_UNLOCK(locked_tcb);
5158				}
5159				*offset = length;
5160				return (NULL);
5161			}
5162			/* He's alive so give him credit */
5163			if (stcb) {
5164				int abort_flag = 0;
5165
5166				stcb->asoc.overall_error_count = 0;
5167				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5168					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5169					    stcb->asoc.overall_error_count,
5170					    0,
5171					    SCTP_FROM_SCTP_INPUT,
5172					    __LINE__);
5173				}
5174				*fwd_tsn_seen = 1;
5175				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5176					/* We are not interested anymore */
5177#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5178					so = SCTP_INP_SO(inp);
5179					atomic_add_int(&stcb->asoc.refcnt, 1);
5180					SCTP_TCB_UNLOCK(stcb);
5181					SCTP_SOCKET_LOCK(so, 1);
5182					SCTP_TCB_LOCK(stcb);
5183					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5184#endif
5185					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5186#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5187					SCTP_SOCKET_UNLOCK(so, 1);
5188#endif
5189					*offset = length;
5190					return (NULL);
5191				}
5192				sctp_handle_forward_tsn(stcb,
5193				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5194				if (abort_flag) {
5195					*offset = length;
5196					return (NULL);
5197				} else {
5198					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5199						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5200						    stcb->asoc.overall_error_count,
5201						    0,
5202						    SCTP_FROM_SCTP_INPUT,
5203						    __LINE__);
5204					}
5205					stcb->asoc.overall_error_count = 0;
5206				}
5207
5208			}
5209			break;
5210		case SCTP_STREAM_RESET:
5211			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5212			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5213				/* Its not ours */
5214				if (locked_tcb) {
5215					SCTP_TCB_UNLOCK(locked_tcb);
5216				}
5217				*offset = length;
5218				return (NULL);
5219			}
5220			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5221				/* We are not interested anymore */
5222#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5223				so = SCTP_INP_SO(inp);
5224				atomic_add_int(&stcb->asoc.refcnt, 1);
5225				SCTP_TCB_UNLOCK(stcb);
5226				SCTP_SOCKET_LOCK(so, 1);
5227				SCTP_TCB_LOCK(stcb);
5228				atomic_subtract_int(&stcb->asoc.refcnt, 1);
5229#endif
5230				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5231#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5232				SCTP_SOCKET_UNLOCK(so, 1);
5233#endif
5234				*offset = length;
5235				return (NULL);
5236			}
5237			if (stcb->asoc.peer_supports_strreset == 0) {
5238				/*
5239				 * hmm, peer should have announced this, but
5240				 * we will turn it on since he is sending us
5241				 * a stream reset.
5242				 */
5243				stcb->asoc.peer_supports_strreset = 1;
5244			}
5245			if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
5246				/* stop processing */
5247				*offset = length;
5248				return (NULL);
5249			}
5250			break;
5251		case SCTP_PACKET_DROPPED:
5252			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5253			/* re-get it all please */
5254			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5255				/* Its not ours */
5256				if (locked_tcb) {
5257					SCTP_TCB_UNLOCK(locked_tcb);
5258				}
5259				*offset = length;
5260				return (NULL);
5261			}
5262			if (ch && (stcb) && netp && (*netp)) {
5263				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5264				    stcb, *netp,
5265				    min(chk_length, (sizeof(chunk_buf) - 4)));
5266
5267			}
5268			break;
5269
5270		case SCTP_AUTHENTICATION:
5271			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5272			if (SCTP_BASE_SYSCTL(sctp_auth_disable))
5273				goto unknown_chunk;
5274
5275			if (stcb == NULL) {
5276				/* save the first AUTH for later processing */
5277				if (auth_skipped == 0) {
5278					auth_offset = *offset;
5279					auth_len = chk_length;
5280					auth_skipped = 1;
5281				}
5282				/* skip this chunk (temporarily) */
5283				goto next_chunk;
5284			}
5285			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5286			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5287			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5288				/* Its not ours */
5289				if (locked_tcb) {
5290					SCTP_TCB_UNLOCK(locked_tcb);
5291				}
5292				*offset = length;
5293				return (NULL);
5294			}
5295			if (got_auth == 1) {
5296				/* skip this chunk... it's already auth'd */
5297				goto next_chunk;
5298			}
5299			got_auth = 1;
5300			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5301			    m, *offset)) {
5302				/* auth HMAC failed so dump the packet */
5303				*offset = length;
5304				return (stcb);
5305			} else {
5306				/* remaining chunks are HMAC checked */
5307				stcb->asoc.authenticated = 1;
5308			}
5309			break;
5310
5311		default:
5312	unknown_chunk:
5313			/* it's an unknown chunk! */
5314			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5315				struct mbuf *mm;
5316				struct sctp_paramhdr *phd;
5317
5318				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
5319				    0, M_DONTWAIT, 1, MT_DATA);
5320				if (mm) {
5321					phd = mtod(mm, struct sctp_paramhdr *);
5322					/*
5323					 * We cheat and use param type since
5324					 * we did not bother to define a
5325					 * error cause struct. They are the
5326					 * same basic format with different
5327					 * names.
5328					 */
5329					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5330					phd->param_length = htons(chk_length + sizeof(*phd));
5331					SCTP_BUF_LEN(mm) = sizeof(*phd);
5332					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
5333					    M_DONTWAIT);
5334					if (SCTP_BUF_NEXT(mm)) {
5335#ifdef SCTP_MBUF_LOGGING
5336						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5337							struct mbuf *mat;
5338
5339							mat = SCTP_BUF_NEXT(mm);
5340							while (mat) {
5341								if (SCTP_BUF_IS_EXTENDED(mat)) {
5342									sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5343								}
5344								mat = SCTP_BUF_NEXT(mat);
5345							}
5346						}
5347#endif
5348						sctp_queue_op_err(stcb, mm);
5349					} else {
5350						sctp_m_freem(mm);
5351					}
5352				}
5353			}
5354			if ((ch->chunk_type & 0x80) == 0) {
5355				/* discard this packet */
5356				*offset = length;
5357				return (stcb);
5358			}	/* else skip this bad chunk and continue... */
5359			break;
5360		}		/* switch (ch->chunk_type) */
5361
5362
5363next_chunk:
5364		/* get the next chunk */
5365		*offset += SCTP_SIZE32(chk_length);
5366		if (*offset >= length) {
5367			/* no more data left in the mbuf chain */
5368			break;
5369		}
5370		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5371		    sizeof(struct sctp_chunkhdr), chunk_buf);
5372		if (ch == NULL) {
5373			if (locked_tcb) {
5374				SCTP_TCB_UNLOCK(locked_tcb);
5375			}
5376			*offset = length;
5377			return (NULL);
5378		}
5379	}			/* while */
5380
5381	if (asconf_cnt > 0 && stcb != NULL) {
5382		sctp_send_asconf_ack(stcb);
5383	}
5384	return (stcb);
5385}
5386
5387
5388#ifdef INVARIANTS
5389#ifdef __GNUC__
5390__attribute__((noinline))
5391#endif
5392	void
5393	     sctp_validate_no_locks(struct sctp_inpcb *inp)
5394{
5395	struct sctp_tcb *lstcb;
5396
5397	LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) {
5398		if (mtx_owned(&lstcb->tcb_mtx)) {
5399			panic("Own lock on stcb at return from input");
5400		}
5401	}
5402	if (mtx_owned(&inp->inp_create_mtx)) {
5403		panic("Own create lock on inp");
5404	}
5405	if (mtx_owned(&inp->inp_mtx)) {
5406		panic("Own inp lock on inp");
5407	}
5408}
5409
5410#endif
5411
5412/*
5413 * common input chunk processing (v4 and v6)
5414 */
5415void
5416sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
5417    int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
5418    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
5419    uint8_t ecn_bits, uint32_t vrf_id, uint16_t port)
5420{
5421	/*
5422	 * Control chunk processing
5423	 */
5424	uint32_t high_tsn;
5425	int fwd_tsn_seen = 0, data_processed = 0;
5426	struct mbuf *m = *mm;
5427	int abort_flag = 0;
5428	int un_sent;
5429	int cnt_ctrl_ready = 0;
5430
5431	SCTP_STAT_INCR(sctps_recvdatagrams);
5432#ifdef SCTP_AUDITING_ENABLED
5433	sctp_audit_log(0xE0, 1);
5434	sctp_auditing(0, inp, stcb, net);
5435#endif
5436
5437	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5438	    m, iphlen, offset, length, stcb);
5439	if (stcb) {
5440		/* always clear this before beginning a packet */
5441		stcb->asoc.authenticated = 0;
5442		stcb->asoc.seen_a_sack_this_pkt = 0;
5443		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5444		    stcb, stcb->asoc.state);
5445
5446		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5447		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5448			/*-
5449			 * If we hit here, we had a ref count
5450			 * up when the assoc was aborted and the
5451			 * timer is clearing out the assoc, we should
5452			 * NOT respond to any packet.. its OOTB.
5453			 */
5454			SCTP_TCB_UNLOCK(stcb);
5455			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5456			    vrf_id, port);
5457			goto out_now;
5458		}
5459	}
5460	if (IS_SCTP_CONTROL(ch)) {
5461		/* process the control portion of the SCTP packet */
5462		/* sa_ignore NO_NULL_CHK */
5463		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
5464		    inp, stcb, &net, &fwd_tsn_seen, vrf_id, port);
5465		if (stcb) {
5466			/*
5467			 * This covers us if the cookie-echo was there and
5468			 * it changes our INP.
5469			 */
5470			inp = stcb->sctp_ep;
5471			if ((net) && (port)) {
5472				if (net->port == 0) {
5473					sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5474				}
5475				net->port = port;
5476			}
5477		}
5478	} else {
5479		/*
5480		 * no control chunks, so pre-process DATA chunks (these
5481		 * checks are taken care of by control processing)
5482		 */
5483
5484		/*
5485		 * if DATA only packet, and auth is required, then punt...
5486		 * can't have authenticated without any AUTH (control)
5487		 * chunks
5488		 */
5489		if ((stcb != NULL) &&
5490		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5491		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5492			/* "silently" ignore */
5493			SCTP_STAT_INCR(sctps_recvauthmissing);
5494			SCTP_TCB_UNLOCK(stcb);
5495			goto out_now;
5496		}
5497		if (stcb == NULL) {
5498			/* out of the blue DATA chunk */
5499			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5500			    vrf_id, port);
5501			goto out_now;
5502		}
5503		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5504			/* v_tag mismatch! */
5505			SCTP_STAT_INCR(sctps_badvtag);
5506			SCTP_TCB_UNLOCK(stcb);
5507			goto out_now;
5508		}
5509	}
5510
5511	if (stcb == NULL) {
5512		/*
5513		 * no valid TCB for this packet, or we found it's a bad
5514		 * packet while processing control, or we're done with this
5515		 * packet (done or skip rest of data), so we drop it...
5516		 */
5517		goto out_now;
5518	}
5519	/*
5520	 * DATA chunk processing
5521	 */
5522	/* plow through the data chunks while length > offset */
5523
5524	/*
5525	 * Rest should be DATA only.  Check authentication state if AUTH for
5526	 * DATA is required.
5527	 */
5528	if ((length > offset) &&
5529	    (stcb != NULL) &&
5530	    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5531	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5532	    !stcb->asoc.authenticated) {
5533		/* "silently" ignore */
5534		SCTP_STAT_INCR(sctps_recvauthmissing);
5535		SCTPDBG(SCTP_DEBUG_AUTH1,
5536		    "Data chunk requires AUTH, skipped\n");
5537		goto trigger_send;
5538	}
5539	if (length > offset) {
5540		int retval;
5541
5542		/*
5543		 * First check to make sure our state is correct. We would
5544		 * not get here unless we really did have a tag, so we don't
5545		 * abort if this happens, just dump the chunk silently.
5546		 */
5547		switch (SCTP_GET_STATE(&stcb->asoc)) {
5548		case SCTP_STATE_COOKIE_ECHOED:
5549			/*
5550			 * we consider data with valid tags in this state
5551			 * shows us the cookie-ack was lost. Imply it was
5552			 * there.
5553			 */
5554			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5555				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5556				    stcb->asoc.overall_error_count,
5557				    0,
5558				    SCTP_FROM_SCTP_INPUT,
5559				    __LINE__);
5560			}
5561			stcb->asoc.overall_error_count = 0;
5562			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5563			break;
5564		case SCTP_STATE_COOKIE_WAIT:
5565			/*
5566			 * We consider OOTB any data sent during asoc setup.
5567			 */
5568			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5569			    vrf_id, port);
5570			SCTP_TCB_UNLOCK(stcb);
5571			goto out_now;
5572			/* sa_ignore NOTREACHED */
5573			break;
5574		case SCTP_STATE_EMPTY:	/* should not happen */
5575		case SCTP_STATE_INUSE:	/* should not happen */
5576		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5577		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5578		default:
5579			SCTP_TCB_UNLOCK(stcb);
5580			goto out_now;
5581			/* sa_ignore NOTREACHED */
5582			break;
5583		case SCTP_STATE_OPEN:
5584		case SCTP_STATE_SHUTDOWN_SENT:
5585			break;
5586		}
5587		/* plow through the data chunks while length > offset */
5588		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
5589		    inp, stcb, net, &high_tsn);
5590		if (retval == 2) {
5591			/*
5592			 * The association aborted, NO UNLOCK needed since
5593			 * the association is destroyed.
5594			 */
5595			goto out_now;
5596		}
5597		data_processed = 1;
5598		/*
5599		 * Anything important needs to have been m_copy'ed in
5600		 * process_data
5601		 */
5602	}
5603	/* take care of ecn */
5604	if (stcb->asoc.ecn_allowed && ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
5605		/* Yep, we need to add a ECNE */
5606		sctp_send_ecn_echo(stcb, net, high_tsn);
5607	}
5608	if ((data_processed == 0) && (fwd_tsn_seen)) {
5609		int was_a_gap;
5610		uint32_t highest_tsn;
5611
5612		if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
5613			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
5614		} else {
5615			highest_tsn = stcb->asoc.highest_tsn_inside_map;
5616		}
5617		was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
5618		stcb->asoc.send_sack = 1;
5619		sctp_sack_check(stcb, was_a_gap, &abort_flag);
5620		if (abort_flag) {
5621			/* Again, we aborted so NO UNLOCK needed */
5622			goto out_now;
5623		}
5624	} else if (fwd_tsn_seen) {
5625		stcb->asoc.send_sack = 1;
5626	}
5627	/* trigger send of any chunks in queue... */
5628trigger_send:
5629#ifdef SCTP_AUDITING_ENABLED
5630	sctp_audit_log(0xE0, 2);
5631	sctp_auditing(1, inp, stcb, net);
5632#endif
5633	SCTPDBG(SCTP_DEBUG_INPUT1,
5634	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5635	    stcb->asoc.peers_rwnd,
5636	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5637	    stcb->asoc.total_flight);
5638	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5639	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
5640		cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
5641	}
5642	if (cnt_ctrl_ready ||
5643	    ((un_sent) &&
5644	    (stcb->asoc.peers_rwnd > 0 ||
5645	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5646		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5647		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5648		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5649	}
5650#ifdef SCTP_AUDITING_ENABLED
5651	sctp_audit_log(0xE0, 3);
5652	sctp_auditing(2, inp, stcb, net);
5653#endif
5654	SCTP_TCB_UNLOCK(stcb);
5655out_now:
5656#ifdef INVARIANTS
5657	sctp_validate_no_locks(inp);
5658#endif
5659	return;
5660}
5661
5662#if 0
5663static void
5664sctp_print_mbuf_chain(struct mbuf *m)
5665{
5666	for (; m; m = SCTP_BUF_NEXT(m)) {
5667		printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m));
5668		if (SCTP_BUF_IS_EXTENDED(m))
5669			printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m));
5670	}
5671}
5672
5673#endif
5674
5675void
5676sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
5677{
5678#ifdef SCTP_MBUF_LOGGING
5679	struct mbuf *mat;
5680
5681#endif
5682	struct mbuf *m;
5683	int iphlen;
5684	uint32_t vrf_id = 0;
5685	uint8_t ecn_bits;
5686	struct ip *ip;
5687	struct sctphdr *sh;
5688	struct sctp_inpcb *inp = NULL;
5689	struct sctp_nets *net;
5690	struct sctp_tcb *stcb = NULL;
5691	struct sctp_chunkhdr *ch;
5692	int refcount_up = 0;
5693	int length, mlen, offset;
5694
5695#if !defined(SCTP_WITH_NO_CSUM)
5696	uint32_t check, calc_check;
5697
5698#endif
5699
5700	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5701		SCTP_RELEASE_PKT(i_pak);
5702		return;
5703	}
5704	mlen = SCTP_HEADER_LEN(i_pak);
5705	iphlen = off;
5706	m = SCTP_HEADER_TO_CHAIN(i_pak);
5707
5708	net = NULL;
5709	SCTP_STAT_INCR(sctps_recvpackets);
5710	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5711
5712
5713#ifdef SCTP_MBUF_LOGGING
5714	/* Log in any input mbufs */
5715	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5716		mat = m;
5717		while (mat) {
5718			if (SCTP_BUF_IS_EXTENDED(mat)) {
5719				sctp_log_mb(mat, SCTP_MBUF_INPUT);
5720			}
5721			mat = SCTP_BUF_NEXT(mat);
5722		}
5723	}
5724#endif
5725#ifdef  SCTP_PACKET_LOGGING
5726	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
5727		sctp_packet_log(m, mlen);
5728#endif
5729	/*
5730	 * Must take out the iphlen, since mlen expects this (only effect lb
5731	 * case)
5732	 */
5733	mlen -= iphlen;
5734
5735	/*
5736	 * Get IP, SCTP, and first chunk header together in first mbuf.
5737	 */
5738	ip = mtod(m, struct ip *);
5739	offset = iphlen + sizeof(*sh) + sizeof(*ch);
5740	if (SCTP_BUF_LEN(m) < offset) {
5741		if ((m = m_pullup(m, offset)) == 0) {
5742			SCTP_STAT_INCR(sctps_hdrops);
5743			return;
5744		}
5745		ip = mtod(m, struct ip *);
5746	}
5747	/* validate mbuf chain length with IP payload length */
5748	if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) {
5749		SCTP_STAT_INCR(sctps_hdrops);
5750		goto bad;
5751	}
5752	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5753	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
5754	SCTPDBG(SCTP_DEBUG_INPUT1,
5755	    "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
5756
5757	/* SCTP does not allow broadcasts or multicasts */
5758	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
5759		goto bad;
5760	}
5761	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
5762		/*
5763		 * We only look at broadcast if its a front state, All
5764		 * others we will not have a tcb for anyway.
5765		 */
5766		goto bad;
5767	}
5768	/* validate SCTP checksum */
5769	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
5770	    "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
5771	    m->m_pkthdr.len,
5772	    if_name(m->m_pkthdr.rcvif),
5773	    m->m_pkthdr.csum_flags);
5774#if defined(SCTP_WITH_NO_CSUM)
5775	SCTP_STAT_INCR(sctps_recvnocrc);
5776#else
5777	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
5778		SCTP_STAT_INCR(sctps_recvhwcrc);
5779		goto sctp_skip_csum_4;
5780	}
5781	check = sh->checksum;	/* save incoming checksum */
5782	sh->checksum = 0;	/* prepare for calc */
5783	calc_check = sctp_calculate_cksum(m, iphlen);
5784	sh->checksum = check;
5785	SCTP_STAT_INCR(sctps_recvswcrc);
5786	if (calc_check != check) {
5787		SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5788		    calc_check, check, m, mlen, iphlen);
5789
5790		stcb = sctp_findassociation_addr(m, iphlen,
5791		    offset - sizeof(*ch),
5792		    sh, ch, &inp, &net,
5793		    vrf_id);
5794		if ((net) && (port)) {
5795			if (net->port == 0) {
5796				sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5797			}
5798			net->port = port;
5799		}
5800		if ((inp) && (stcb)) {
5801			sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
5802			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5803		} else if ((inp != NULL) && (stcb == NULL)) {
5804			refcount_up = 1;
5805		}
5806		SCTP_STAT_INCR(sctps_badsum);
5807		SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5808		goto bad;
5809	}
5810sctp_skip_csum_4:
5811#endif
5812	/* destination port of 0 is illegal, based on RFC2960. */
5813	if (sh->dest_port == 0) {
5814		SCTP_STAT_INCR(sctps_hdrops);
5815		goto bad;
5816	}
5817	/*
5818	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
5819	 * IP/SCTP/first chunk header...
5820	 */
5821	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
5822	    sh, ch, &inp, &net, vrf_id);
5823	if ((net) && (port)) {
5824		if (net->port == 0) {
5825			sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5826		}
5827		net->port = port;
5828	}
5829	/* inp's ref-count increased && stcb locked */
5830	if (inp == NULL) {
5831		struct sctp_init_chunk *init_chk, chunk_buf;
5832
5833		SCTP_STAT_INCR(sctps_noport);
5834#ifdef ICMP_BANDLIM
5835		/*
5836		 * we use the bandwidth limiting to protect against sending
5837		 * too many ABORTS all at once. In this case these count the
5838		 * same as an ICMP message.
5839		 */
5840		if (badport_bandlim(0) < 0)
5841			goto bad;
5842#endif				/* ICMP_BANDLIM */
5843		SCTPDBG(SCTP_DEBUG_INPUT1,
5844		    "Sending a ABORT from packet entry!\n");
5845		if (ch->chunk_type == SCTP_INITIATION) {
5846			/*
5847			 * we do a trick here to get the INIT tag, dig in
5848			 * and get the tag from the INIT and put it in the
5849			 * common header.
5850			 */
5851			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
5852			    iphlen + sizeof(*sh), sizeof(*init_chk),
5853			    (uint8_t *) & chunk_buf);
5854			if (init_chk != NULL)
5855				sh->v_tag = init_chk->init.initiate_tag;
5856		}
5857		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5858			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
5859			goto bad;
5860		}
5861		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5862			goto bad;
5863		}
5864		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
5865			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
5866		goto bad;
5867	} else if (stcb == NULL) {
5868		refcount_up = 1;
5869	}
5870#ifdef IPSEC
5871	/*
5872	 * I very much doubt any of the IPSEC stuff will work but I have no
5873	 * idea, so I will leave it in place.
5874	 */
5875	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5876		MODULE_GLOBAL(ipsec4stat).in_polvio++;
5877		SCTP_STAT_INCR(sctps_hdrops);
5878		goto bad;
5879	}
5880#endif				/* IPSEC */
5881
5882	/*
5883	 * common chunk processing
5884	 */
5885	length = ip->ip_len + iphlen;
5886	offset -= sizeof(struct sctp_chunkhdr);
5887
5888	ecn_bits = ip->ip_tos;
5889
5890	/* sa_ignore NO_NULL_CHK */
5891	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
5892	    inp, stcb, net, ecn_bits, vrf_id, port);
5893	/* inp's ref-count reduced && stcb unlocked */
5894	if (m) {
5895		sctp_m_freem(m);
5896	}
5897	if ((inp) && (refcount_up)) {
5898		/* reduce ref-count */
5899		SCTP_INP_DECR_REF(inp);
5900	}
5901	return;
5902bad:
5903	if (stcb) {
5904		SCTP_TCB_UNLOCK(stcb);
5905	}
5906	if ((inp) && (refcount_up)) {
5907		/* reduce ref-count */
5908		SCTP_INP_DECR_REF(inp);
5909	}
5910	if (m) {
5911		sctp_m_freem(m);
5912	}
5913	return;
5914}
5915void
5916sctp_input(i_pak, off)
5917	struct mbuf *i_pak;
5918	int off;
5919{
5920	sctp_input_with_port(i_pak, off, 0);
5921}
5922