sctp_input.c revision 170744
11224Sslugovoy/*-
21224Sslugovoy * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
31224Sslugovoy *
41224Sslugovoy * Redistribution and use in source and binary forms, with or without
51224Sslugovoy * modification, are permitted provided that the following conditions are met:
61224Sslugovoy *
71224Sslugovoy * a) Redistributions of source code must retain the above copyright notice,
81224Sslugovoy *   this list of conditions and the following disclaimer.
91224Sslugovoy *
101224Sslugovoy * b) Redistributions in binary form must reproduce the above copyright
111224Sslugovoy *    notice, this list of conditions and the following disclaimer in
121224Sslugovoy *   the documentation and/or other materials provided with the distribution.
131224Sslugovoy *
141224Sslugovoy * c) Neither the name of Cisco Systems, Inc. nor the names of its
151224Sslugovoy *    contributors may be used to endorse or promote products derived
161224Sslugovoy *    from this software without specific prior written permission.
171224Sslugovoy *
181224Sslugovoy * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
191224Sslugovoy * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
201224Sslugovoy * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
211224Sslugovoy * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
221224Sslugovoy * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
231224Sslugovoy * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
241224Sslugovoy * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
251224Sslugovoy * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
261224Sslugovoy * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
271224Sslugovoy * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
281224Sslugovoy * THE POSSIBILITY OF SUCH DAMAGE.
291224Sslugovoy */
301224Sslugovoy
311224Sslugovoy/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
321224Sslugovoy
331739Ssundar#include <sys/cdefs.h>
341224Sslugovoy__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 170744 2007-06-14 22:59:04Z rrs $");
351224Sslugovoy
361224Sslugovoy#include <netinet/sctp_os.h>
371224Sslugovoy#include <netinet/sctp_var.h>
381739Ssundar#include <netinet/sctp_sysctl.h>
391224Sslugovoy#include <netinet/sctp_pcb.h>
401224Sslugovoy#include <netinet/sctp_header.h>
411224Sslugovoy#include <netinet/sctputil.h>
421224Sslugovoy#include <netinet/sctp_output.h>
431224Sslugovoy#include <netinet/sctp_input.h>
441739Ssundar#include <netinet/sctp_auth.h>
451739Ssundar#include <netinet/sctp_indata.h>
461739Ssundar#include <netinet/sctp_asconf.h>
471739Ssundar#include <netinet/sctp_bsd_addr.h>
481224Sslugovoy
491739Ssundar
501739Ssundar
511739Ssundarstatic void
521739Ssundarsctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
531739Ssundar{
541739Ssundar	struct sctp_nets *net;
551739Ssundar
561739Ssundar	/*
571739Ssundar	 * This now not only stops all cookie timers it also stops any INIT
581739Ssundar	 * timers as well. This will make sure that the timers are stopped
591739Ssundar	 * in all collision cases.
601739Ssundar	 */
611739Ssundar	SCTP_TCB_LOCK_ASSERT(stcb);
621739Ssundar	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
631739Ssundar		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
641739Ssundar			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
651739Ssundar			    stcb->sctp_ep,
661739Ssundar			    stcb,
671739Ssundar			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
681739Ssundar		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
691224Sslugovoy			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
701224Sslugovoy			    stcb->sctp_ep,
711224Sslugovoy			    stcb,
721224Sslugovoy			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
731224Sslugovoy		}
741224Sslugovoy	}
751224Sslugovoy}
761224Sslugovoy
771224Sslugovoy/* INIT handler */
781224Sslugovoystatic void
791224Sslugovoysctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
801224Sslugovoy    struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
811224Sslugovoy    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
821224Sslugovoy{
831224Sslugovoy	struct sctp_init *init;
841224Sslugovoy	struct mbuf *op_err;
851224Sslugovoy	uint32_t init_limit;
861224Sslugovoy
871224Sslugovoy	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
881224Sslugovoy	    stcb);
891224Sslugovoy	op_err = NULL;
901224Sslugovoy	init = &cp->init;
911224Sslugovoy	/* First are we accepting? */
921224Sslugovoy	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
931224Sslugovoy		SCTPDBG(SCTP_DEBUG_INPUT2,
941224Sslugovoy		    "sctp_handle_init: Abort, so_qlimit:%d\n",
951224Sslugovoy		    inp->sctp_socket->so_qlimit);
961224Sslugovoy		/*
971224Sslugovoy		 * FIX ME ?? What about TCP model and we have a
981224Sslugovoy		 * match/restart case?
991224Sslugovoy		 */
1001739Ssundar		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
1011739Ssundar		    vrf_id);
1021739Ssundar		if (stcb)
1031224Sslugovoy			*abort_no_unlock = 1;
1041224Sslugovoy		return;
1051224Sslugovoy	}
1061224Sslugovoy	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
1071224Sslugovoy		/* Invalid length */
1081224Sslugovoy		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1091230Sslugovoy		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
1101739Ssundar		    vrf_id);
111		if (stcb)
112			*abort_no_unlock = 1;
113		return;
114	}
115	/* validate parameters */
116	if (init->initiate_tag == 0) {
117		/* protocol error... send abort */
118		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
119		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
120		    vrf_id);
121		if (stcb)
122			*abort_no_unlock = 1;
123		return;
124	}
125	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
126		/* invalid parameter... send abort */
127		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
128		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
129		    vrf_id);
130		return;
131	}
132	if (init->num_inbound_streams == 0) {
133		/* protocol error... send abort */
134		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
135		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
136		    vrf_id);
137		if (stcb)
138			*abort_no_unlock = 1;
139		return;
140	}
141	if (init->num_outbound_streams == 0) {
142		/* protocol error... send abort */
143		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
144		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
145		    vrf_id);
146		if (stcb)
147			*abort_no_unlock = 1;
148		return;
149	}
150	init_limit = offset + ntohs(cp->ch.chunk_length);
151	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
152	    init_limit)) {
153		/* auth parameter(s) error... send abort */
154		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id);
155		if (stcb)
156			*abort_no_unlock = 1;
157		return;
158	}
159	/* send an INIT-ACK w/cookie */
160	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
161	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id);
162}
163
164/*
165 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
166 */
167static int
168sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
169    struct sctp_nets *net)
170{
171	struct sctp_init *init;
172	struct sctp_association *asoc;
173	struct sctp_nets *lnet;
174	unsigned int i;
175
176	init = &cp->init;
177	asoc = &stcb->asoc;
178	/* save off parameters */
179	asoc->peer_vtag = ntohl(init->initiate_tag);
180	asoc->peers_rwnd = ntohl(init->a_rwnd);
181	if (TAILQ_FIRST(&asoc->nets)) {
182		/* update any ssthresh's that may have a default */
183		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
184			lnet->ssthresh = asoc->peers_rwnd;
185
186			if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
187				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
188			}
189		}
190	}
191	SCTP_TCB_SEND_LOCK(stcb);
192	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
193		unsigned int newcnt;
194		struct sctp_stream_out *outs;
195		struct sctp_stream_queue_pending *sp;
196
197		/* cut back on number of streams */
198		newcnt = ntohs(init->num_inbound_streams);
199		/* This if is probably not needed but I am cautious */
200		if (asoc->strmout) {
201			/* First make sure no data chunks are trapped */
202			for (i = newcnt; i < asoc->pre_open_streams; i++) {
203				outs = &asoc->strmout[i];
204				sp = TAILQ_FIRST(&outs->outqueue);
205				while (sp) {
206					TAILQ_REMOVE(&outs->outqueue, sp,
207					    next);
208					asoc->stream_queue_cnt--;
209					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
210					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
211					    sp);
212					if (sp->data) {
213						sctp_m_freem(sp->data);
214						sp->data = NULL;
215					}
216					sctp_free_remote_addr(sp->net);
217					sp->net = NULL;
218					/* Free the chunk */
219					SCTP_PRINTF("sp:%p tcb:%p weird free case\n",
220					    sp, stcb);
221
222					sctp_free_a_strmoq(stcb, sp);
223					/* sa_ignore FREED_MEMORY */
224					sp = TAILQ_FIRST(&outs->outqueue);
225				}
226			}
227		}
228		/* cut back the count and abandon the upper streams */
229		asoc->pre_open_streams = newcnt;
230	}
231	SCTP_TCB_SEND_UNLOCK(stcb);
232	asoc->streamoutcnt = asoc->pre_open_streams;
233	/* init tsn's */
234	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
235	if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
236		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
237	}
238	/* This is the next one we expect */
239	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
240
241	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
242	asoc->cumulative_tsn = asoc->asconf_seq_in;
243	asoc->last_echo_tsn = asoc->asconf_seq_in;
244	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
245	/* open the requested streams */
246
247	if (asoc->strmin != NULL) {
248		/* Free the old ones */
249		struct sctp_queued_to_read *ctl;
250
251		for (i = 0; i < asoc->streamincnt; i++) {
252			ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
253			while (ctl) {
254				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
255				sctp_free_remote_addr(ctl->whoFrom);
256				sctp_m_freem(ctl->data);
257				ctl->data = NULL;
258				sctp_free_a_readq(stcb, ctl);
259				ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
260			}
261		}
262		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
263	}
264	asoc->streamincnt = ntohs(init->num_outbound_streams);
265	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
266		asoc->streamincnt = MAX_SCTP_STREAMS;
267	}
268	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
269	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
270	if (asoc->strmin == NULL) {
271		/* we didn't get memory for the streams! */
272		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
273		return (-1);
274	}
275	for (i = 0; i < asoc->streamincnt; i++) {
276		asoc->strmin[i].stream_no = i;
277		asoc->strmin[i].last_sequence_delivered = 0xffff;
278		/*
279		 * U-stream ranges will be set when the cookie is unpacked.
280		 * Or for the INIT sender they are un set (if pr-sctp not
281		 * supported) when the INIT-ACK arrives.
282		 */
283		TAILQ_INIT(&asoc->strmin[i].inqueue);
284		asoc->strmin[i].delivery_started = 0;
285	}
286	/*
287	 * load_address_from_init will put the addresses into the
288	 * association when the COOKIE is processed or the INIT-ACK is
289	 * processed. Both types of COOKIE's existing and new call this
290	 * routine. It will remove addresses that are no longer in the
291	 * association (for the restarting case where addresses are
292	 * removed). Up front when the INIT arrives we will discard it if it
293	 * is a restart and new addresses have been added.
294	 */
295	/* sa_ignore MEMLEAK */
296	return (0);
297}
298
299/*
300 * INIT-ACK message processing/consumption returns value < 0 on error
301 */
302static int
303sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
304    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
305    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
306{
307	struct sctp_association *asoc;
308	struct mbuf *op_err;
309	int retval, abort_flag;
310	uint32_t initack_limit;
311
312	/* First verify that we have no illegal param's */
313	abort_flag = 0;
314	op_err = NULL;
315
316	op_err = sctp_arethere_unrecognized_parameters(m,
317	    (offset + sizeof(struct sctp_init_chunk)),
318	    &abort_flag, (struct sctp_chunkhdr *)cp);
319	if (abort_flag) {
320		/* Send an abort and notify peer */
321		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err);
322		*abort_no_unlock = 1;
323		return (-1);
324	}
325	asoc = &stcb->asoc;
326	/* process the peer's parameters in the INIT-ACK */
327	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
328	if (retval < 0) {
329		return (retval);
330	}
331	initack_limit = offset + ntohs(cp->ch.chunk_length);
332	/* load all addresses */
333	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
334	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
335	    NULL))) {
336		/* Huh, we should abort */
337		SCTPDBG(SCTP_DEBUG_INPUT1,
338		    "Load addresses from INIT causes an abort %d\n",
339		    retval);
340		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
341		    NULL, 0);
342		*abort_no_unlock = 1;
343		return (-1);
344	}
345	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
346	    stcb->asoc.local_hmacs);
347	if (op_err) {
348		sctp_queue_op_err(stcb, op_err);
349		/* queuing will steal away the mbuf chain to the out queue */
350		op_err = NULL;
351	}
352	/* extract the cookie and queue it to "echo" it back... */
353	stcb->asoc.overall_error_count = 0;
354	net->error_count = 0;
355
356	/*
357	 * Cancel the INIT timer, We do this first before queueing the
358	 * cookie. We always cancel at the primary to assue that we are
359	 * canceling the timer started by the INIT which always goes to the
360	 * primary.
361	 */
362	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
363	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
364
365	/* calculate the RTO */
366	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered);
367
368	retval = sctp_send_cookie_echo(m, offset, stcb, net);
369	if (retval < 0) {
370		/*
371		 * No cookie, we probably should send a op error. But in any
372		 * case if there is no cookie in the INIT-ACK, we can
373		 * abandon the peer, its broke.
374		 */
375		if (retval == -3) {
376			/* We abort with an error of missing mandatory param */
377			op_err =
378			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
379			if (op_err) {
380				/*
381				 * Expand beyond to include the mandatory
382				 * param cookie
383				 */
384				struct sctp_inv_mandatory_param *mp;
385
386				SCTP_BUF_LEN(op_err) =
387				    sizeof(struct sctp_inv_mandatory_param);
388				mp = mtod(op_err,
389				    struct sctp_inv_mandatory_param *);
390				/* Subtract the reserved param */
391				mp->length =
392				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
393				mp->num_param = htonl(1);
394				mp->param = htons(SCTP_STATE_COOKIE);
395				mp->resv = 0;
396			}
397			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
398			    sh, op_err, 0);
399			*abort_no_unlock = 1;
400		}
401		return (retval);
402	}
403	return (0);
404}
405
406static void
407sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
408    struct sctp_tcb *stcb, struct sctp_nets *net)
409{
410	struct sockaddr_storage store;
411	struct sockaddr_in *sin;
412	struct sockaddr_in6 *sin6;
413	struct sctp_nets *r_net;
414	struct timeval tv;
415
416	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
417		/* Invalid length */
418		return;
419	}
420	sin = (struct sockaddr_in *)&store;
421	sin6 = (struct sockaddr_in6 *)&store;
422
423	memset(&store, 0, sizeof(store));
424	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
425	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
426		sin->sin_family = cp->heartbeat.hb_info.addr_family;
427		sin->sin_len = cp->heartbeat.hb_info.addr_len;
428		sin->sin_port = stcb->rport;
429		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
430		    sizeof(sin->sin_addr));
431	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
432	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
433		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
434		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
435		sin6->sin6_port = stcb->rport;
436		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
437		    sizeof(sin6->sin6_addr));
438	} else {
439		return;
440	}
441	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
442	if (r_net == NULL) {
443		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
444		return;
445	}
446	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
447	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
448	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
449		/*
450		 * If the its a HB and it's random value is correct when can
451		 * confirm the destination.
452		 */
453		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
454		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
455			stcb->asoc.primary_destination = r_net;
456			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
457			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
458			r_net = TAILQ_FIRST(&stcb->asoc.nets);
459			if (r_net != stcb->asoc.primary_destination) {
460				/*
461				 * first one on the list is NOT the primary
462				 * sctp_cmpaddr() is much more efficent if
463				 * the primary is the first on the list,
464				 * make it so.
465				 */
466				TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
467				TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
468			}
469		}
470		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
471		    stcb, 0, (void *)r_net);
472	}
473	r_net->error_count = 0;
474	r_net->hb_responded = 1;
475	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
476	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
477	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
478		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
479		r_net->dest_state |= SCTP_ADDR_REACHABLE;
480		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
481		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net);
482		/* now was it the primary? if so restore */
483		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
484			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
485		}
486	}
487	/* Now lets do a RTO with this */
488	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv);
489}
490
491static void
492sctp_handle_abort(struct sctp_abort_chunk *cp,
493    struct sctp_tcb *stcb, struct sctp_nets *net)
494{
495	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
496	if (stcb == NULL)
497		return;
498
499	/* stop any receive timers */
500	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
501	/* notify user of the abort and clean up... */
502	sctp_abort_notification(stcb, 0);
503	/* free the tcb */
504	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
505	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
506	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
507		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
508	}
509#ifdef SCTP_ASOCLOG_OF_TSNS
510	sctp_print_out_track_log(stcb);
511#endif
512	sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
513	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
514}
515
516static void
517sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
518    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
519{
520	struct sctp_association *asoc;
521	int some_on_streamwheel;
522
523	SCTPDBG(SCTP_DEBUG_INPUT2,
524	    "sctp_handle_shutdown: handling SHUTDOWN\n");
525	if (stcb == NULL)
526		return;
527	asoc = &stcb->asoc;
528	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
529	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
530		return;
531	}
532	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
533		/* Shutdown NOT the expected size */
534		return;
535	} else {
536		sctp_update_acked(stcb, cp, net, abort_flag);
537	}
538	if (asoc->control_pdapi) {
539		/*
540		 * With a normal shutdown we assume the end of last record.
541		 */
542		SCTP_INP_READ_LOCK(stcb->sctp_ep);
543		asoc->control_pdapi->end_added = 1;
544		asoc->control_pdapi->pdapi_aborted = 1;
545		asoc->control_pdapi = NULL;
546		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
547		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
548	}
549	/* goto SHUTDOWN_RECEIVED state to block new requests */
550	if (stcb->sctp_socket) {
551		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
552		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
553		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
554			asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED;
555			/*
556			 * notify upper layer that peer has initiated a
557			 * shutdown
558			 */
559			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL);
560
561			/* reset time */
562			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
563		}
564	}
565	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
566		/*
567		 * stop the shutdown timer, since we WILL move to
568		 * SHUTDOWN-ACK-SENT.
569		 */
570		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
571	}
572	/* Now are we there yet? */
573	some_on_streamwheel = 0;
574	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
575		/* Check to see if some data queued */
576		struct sctp_stream_out *outs;
577
578		TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
579			if (!TAILQ_EMPTY(&outs->outqueue)) {
580				some_on_streamwheel = 1;
581				break;
582			}
583		}
584	}
585	if (!TAILQ_EMPTY(&asoc->send_queue) ||
586	    !TAILQ_EMPTY(&asoc->sent_queue) ||
587	    some_on_streamwheel) {
588		/* By returning we will push more data out */
589		return;
590	} else {
591		/* no outstanding data to send, so move on... */
592		/* send SHUTDOWN-ACK */
593		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
594		/* move to SHUTDOWN-ACK-SENT state */
595		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
596		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
597			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
598		}
599		asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
600
601		/* start SHUTDOWN timer */
602		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
603		    stcb, net);
604	}
605}
606
607static void
608sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
609    struct sctp_tcb *stcb, struct sctp_nets *net)
610{
611	struct sctp_association *asoc;
612
613	SCTPDBG(SCTP_DEBUG_INPUT2,
614	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
615	if (stcb == NULL)
616		return;
617
618	asoc = &stcb->asoc;
619	/* process according to association state */
620	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
621	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
622		/* unexpected SHUTDOWN-ACK... so ignore... */
623		SCTP_TCB_UNLOCK(stcb);
624		return;
625	}
626	if (asoc->control_pdapi) {
627		/*
628		 * With a normal shutdown we assume the end of last record.
629		 */
630		SCTP_INP_READ_LOCK(stcb->sctp_ep);
631		asoc->control_pdapi->end_added = 1;
632		asoc->control_pdapi->pdapi_aborted = 1;
633		asoc->control_pdapi = NULL;
634		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
635		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
636	}
637	/* are the queues empty? */
638	if (!TAILQ_EMPTY(&asoc->send_queue) ||
639	    !TAILQ_EMPTY(&asoc->sent_queue) ||
640	    !TAILQ_EMPTY(&asoc->out_wheel)) {
641		sctp_report_all_outbound(stcb, 0);
642	}
643	/* stop the timer */
644	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
645	/* send SHUTDOWN-COMPLETE */
646	sctp_send_shutdown_complete(stcb, net);
647	/* notify upper layer protocol */
648	if (stcb->sctp_socket) {
649		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
650		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
651		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
652			/* Set the connected flag to disconnected */
653			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
654		}
655	}
656	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
657	/* free the TCB but first save off the ep */
658	sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
659	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
660}
661
662/*
663 * Skip past the param header and then we will find the chunk that caused the
664 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
665 * our peer must be broken.
666 */
667static void
668sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
669    struct sctp_nets *net)
670{
671	struct sctp_chunkhdr *chk;
672
673	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
674	switch (chk->chunk_type) {
675	case SCTP_ASCONF_ACK:
676	case SCTP_ASCONF:
677		sctp_asconf_cleanup(stcb, net);
678		break;
679	case SCTP_FORWARD_CUM_TSN:
680		stcb->asoc.peer_supports_prsctp = 0;
681		break;
682	default:
683		SCTPDBG(SCTP_DEBUG_INPUT2,
684		    "Peer does not support chunk type %d(%x)??\n",
685		    chk->chunk_type, (uint32_t) chk->chunk_type);
686		break;
687	}
688}
689
690/*
691 * Skip past the param header and then we will find the param that caused the
692 * problem.  There are a number of param's in a ASCONF OR the prsctp param
693 * these will turn of specific features.
694 */
695static void
696sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
697{
698	struct sctp_paramhdr *pbad;
699
700	pbad = phdr + 1;
701	switch (ntohs(pbad->param_type)) {
702		/* pr-sctp draft */
703	case SCTP_PRSCTP_SUPPORTED:
704		stcb->asoc.peer_supports_prsctp = 0;
705		break;
706	case SCTP_SUPPORTED_CHUNK_EXT:
707		break;
708		/* draft-ietf-tsvwg-addip-sctp */
709	case SCTP_ECN_NONCE_SUPPORTED:
710		stcb->asoc.peer_supports_ecn_nonce = 0;
711		stcb->asoc.ecn_nonce_allowed = 0;
712		stcb->asoc.ecn_allowed = 0;
713		break;
714	case SCTP_ADD_IP_ADDRESS:
715	case SCTP_DEL_IP_ADDRESS:
716	case SCTP_SET_PRIM_ADDR:
717		stcb->asoc.peer_supports_asconf = 0;
718		break;
719	case SCTP_SUCCESS_REPORT:
720	case SCTP_ERROR_CAUSE_IND:
721		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
722		SCTPDBG(SCTP_DEBUG_INPUT2,
723		    "Turning off ASCONF to this strange peer\n");
724		stcb->asoc.peer_supports_asconf = 0;
725		break;
726	default:
727		SCTPDBG(SCTP_DEBUG_INPUT2,
728		    "Peer does not support param type %d(%x)??\n",
729		    pbad->param_type, (uint32_t) pbad->param_type);
730		break;
731	}
732}
733
734static int
735sctp_handle_error(struct sctp_chunkhdr *ch,
736    struct sctp_tcb *stcb, struct sctp_nets *net)
737{
738	int chklen;
739	struct sctp_paramhdr *phdr;
740	uint16_t error_type;
741	uint16_t error_len;
742	struct sctp_association *asoc;
743
744	int adjust;
745
746	/* parse through all of the errors and process */
747	asoc = &stcb->asoc;
748	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
749	    sizeof(struct sctp_chunkhdr));
750	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
751	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
752		/* Process an Error Cause */
753		error_type = ntohs(phdr->param_type);
754		error_len = ntohs(phdr->param_length);
755		if ((error_len > chklen) || (error_len == 0)) {
756			/* invalid param length for this param */
757			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
758			    chklen, error_len);
759			return (0);
760		}
761		switch (error_type) {
762		case SCTP_CAUSE_INVALID_STREAM:
763		case SCTP_CAUSE_MISSING_PARAM:
764		case SCTP_CAUSE_INVALID_PARAM:
765		case SCTP_CAUSE_NO_USER_DATA:
766			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
767			    error_type);
768			break;
769		case SCTP_CAUSE_STALE_COOKIE:
770			/*
771			 * We only act if we have echoed a cookie and are
772			 * waiting.
773			 */
774			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
775				int *p;
776
777				p = (int *)((caddr_t)phdr + sizeof(*phdr));
778				/* Save the time doubled */
779				asoc->cookie_preserve_req = ntohl(*p) << 1;
780				asoc->stale_cookie_count++;
781				if (asoc->stale_cookie_count >
782				    asoc->max_init_times) {
783					sctp_abort_notification(stcb, 0);
784					/* now free the asoc */
785					sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
786					return (-1);
787				}
788				/* blast back to INIT state */
789				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
790				asoc->state |= SCTP_STATE_COOKIE_WAIT;
791
792				sctp_stop_all_cookie_timers(stcb);
793				sctp_send_initiate(stcb->sctp_ep, stcb);
794			}
795			break;
796		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
797			/*
798			 * Nothing we can do here, we don't do hostname
799			 * addresses so if the peer does not like my IPv6
800			 * (or IPv4 for that matter) it does not matter. If
801			 * they don't support that type of address, they can
802			 * NOT possibly get that packet type... i.e. with no
803			 * IPv6 you can't recieve a IPv6 packet. so we can
804			 * safely ignore this one. If we ever added support
805			 * for HOSTNAME Addresses, then we would need to do
806			 * something here.
807			 */
808			break;
809		case SCTP_CAUSE_UNRECOG_CHUNK:
810			sctp_process_unrecog_chunk(stcb, phdr, net);
811			break;
812		case SCTP_CAUSE_UNRECOG_PARAM:
813			sctp_process_unrecog_param(stcb, phdr);
814			break;
815		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
816			/*
817			 * We ignore this since the timer will drive out a
818			 * new cookie anyway and there timer will drive us
819			 * to send a SHUTDOWN_COMPLETE. We can't send one
820			 * here since we don't have their tag.
821			 */
822			break;
823		case SCTP_CAUSE_DELETING_LAST_ADDR:
824		case SCTP_CAUSE_RESOURCE_SHORTAGE:
825		case SCTP_CAUSE_DELETING_SRC_ADDR:
826			/*
827			 * We should NOT get these here, but in a
828			 * ASCONF-ACK.
829			 */
830			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
831			    error_type);
832			break;
833		case SCTP_CAUSE_OUT_OF_RESC:
834			/*
835			 * And what, pray tell do we do with the fact that
836			 * the peer is out of resources? Not really sure we
837			 * could do anything but abort. I suspect this
838			 * should have came WITH an abort instead of in a
839			 * OP-ERROR.
840			 */
841			break;
842		default:
843			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
844			    error_type);
845			break;
846		}
847		adjust = SCTP_SIZE32(error_len);
848		chklen -= adjust;
849		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
850	}
851	return (0);
852}
853
854static int
855sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
856    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
857    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
858{
859	struct sctp_init_ack *init_ack;
860	int *state;
861	struct mbuf *op_err;
862
863	SCTPDBG(SCTP_DEBUG_INPUT2,
864	    "sctp_handle_init_ack: handling INIT-ACK\n");
865
866	if (stcb == NULL) {
867		SCTPDBG(SCTP_DEBUG_INPUT2,
868		    "sctp_handle_init_ack: TCB is null\n");
869		return (-1);
870	}
871	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
872		/* Invalid length */
873		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
874		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
875		    op_err, 0);
876		*abort_no_unlock = 1;
877		return (-1);
878	}
879	init_ack = &cp->init;
880	/* validate parameters */
881	if (init_ack->initiate_tag == 0) {
882		/* protocol error... send an abort */
883		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
884		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
885		    op_err, 0);
886		*abort_no_unlock = 1;
887		return (-1);
888	}
889	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
890		/* protocol error... send an abort */
891		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
892		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
893		    op_err, 0);
894		*abort_no_unlock = 1;
895		return (-1);
896	}
897	if (init_ack->num_inbound_streams == 0) {
898		/* protocol error... send an abort */
899		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
900		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
901		    op_err, 0);
902		*abort_no_unlock = 1;
903		return (-1);
904	}
905	if (init_ack->num_outbound_streams == 0) {
906		/* protocol error... send an abort */
907		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
908		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
909		    op_err, 0);
910		*abort_no_unlock = 1;
911		return (-1);
912	}
913	/* process according to association state... */
914	state = &stcb->asoc.state;
915	switch (*state & SCTP_STATE_MASK) {
916	case SCTP_STATE_COOKIE_WAIT:
917		/* this is the expected state for this chunk */
918		/* process the INIT-ACK parameters */
919		if (stcb->asoc.primary_destination->dest_state &
920		    SCTP_ADDR_UNCONFIRMED) {
921			/*
922			 * The primary is where we sent the INIT, we can
923			 * always consider it confirmed when the INIT-ACK is
924			 * returned. Do this before we load addresses
925			 * though.
926			 */
927			stcb->asoc.primary_destination->dest_state &=
928			    ~SCTP_ADDR_UNCONFIRMED;
929			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
930			    stcb, 0, (void *)stcb->asoc.primary_destination);
931		}
932		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
933		    net, abort_no_unlock, vrf_id) < 0) {
934			/* error in parsing parameters */
935			return (-1);
936		}
937		/* update our state */
938		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
939		if (*state & SCTP_STATE_SHUTDOWN_PENDING) {
940			*state = SCTP_STATE_COOKIE_ECHOED |
941			    SCTP_STATE_SHUTDOWN_PENDING;
942		} else {
943			*state = SCTP_STATE_COOKIE_ECHOED;
944		}
945
946		/* reset the RTO calc */
947		stcb->asoc.overall_error_count = 0;
948		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
949		/*
950		 * collapse the init timer back in case of a exponential
951		 * backoff
952		 */
953		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
954		    stcb, net);
955		/*
956		 * the send at the end of the inbound data processing will
957		 * cause the cookie to be sent
958		 */
959		break;
960	case SCTP_STATE_SHUTDOWN_SENT:
961		/* incorrect state... discard */
962		break;
963	case SCTP_STATE_COOKIE_ECHOED:
964		/* incorrect state... discard */
965		break;
966	case SCTP_STATE_OPEN:
967		/* incorrect state... discard */
968		break;
969	case SCTP_STATE_EMPTY:
970	case SCTP_STATE_INUSE:
971	default:
972		/* incorrect state... discard */
973		return (-1);
974		break;
975	}
976	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
977	return (0);
978}
979
980
981/*
982 * handle a state cookie for an existing association m: input packet mbuf
983 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
984 * "split" mbuf and the cookie signature does not exist offset: offset into
985 * mbuf to the cookie-echo chunk
986 */
987static struct sctp_tcb *
988sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
989    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
990    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
991    struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
992    uint32_t vrf_id)
993{
994	struct sctp_association *asoc;
995	struct sctp_init_chunk *init_cp, init_buf;
996	struct sctp_init_ack_chunk *initack_cp, initack_buf;
997	int chk_length;
998	int init_offset, initack_offset, i;
999	int retval;
1000	int spec_flag = 0;
1001	uint32_t how_indx;
1002
1003	/* I know that the TCB is non-NULL from the caller */
1004	asoc = &stcb->asoc;
1005	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1006		if (asoc->cookie_how[how_indx] == 0)
1007			break;
1008	}
1009	if (how_indx < sizeof(asoc->cookie_how)) {
1010		asoc->cookie_how[how_indx] = 1;
1011	}
1012	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1013		/* SHUTDOWN came in after sending INIT-ACK */
1014		struct mbuf *op_err;
1015		struct sctp_paramhdr *ph;
1016
1017		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1018		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1019		    0, M_DONTWAIT, 1, MT_DATA);
1020		if (op_err == NULL) {
1021			/* FOOBAR */
1022			return (NULL);
1023		}
1024		/* pre-reserve some space */
1025		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1026		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1027		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1028		/* Set the len */
1029		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1030		ph = mtod(op_err, struct sctp_paramhdr *);
1031		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1032		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1033		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1034		    vrf_id);
1035		if (how_indx < sizeof(asoc->cookie_how))
1036			asoc->cookie_how[how_indx] = 2;
1037		return (NULL);
1038	}
1039	/*
1040	 * find and validate the INIT chunk in the cookie (peer's info) the
1041	 * INIT should start after the cookie-echo header struct (chunk
1042	 * header, state cookie header struct)
1043	 */
1044	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1045
1046	init_cp = (struct sctp_init_chunk *)
1047	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1048	    (uint8_t *) & init_buf);
1049	if (init_cp == NULL) {
1050		/* could not pull a INIT chunk in cookie */
1051		return (NULL);
1052	}
1053	chk_length = ntohs(init_cp->ch.chunk_length);
1054	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1055		return (NULL);
1056	}
1057	/*
1058	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1059	 * INIT-ACK follows the INIT chunk
1060	 */
1061	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1062	initack_cp = (struct sctp_init_ack_chunk *)
1063	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1064	    (uint8_t *) & initack_buf);
1065	if (initack_cp == NULL) {
1066		/* could not pull INIT-ACK chunk in cookie */
1067		return (NULL);
1068	}
1069	chk_length = ntohs(initack_cp->ch.chunk_length);
1070	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1071		return (NULL);
1072	}
1073	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1074	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1075		/*
1076		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1077		 * to get into the OPEN state
1078		 */
1079		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1080#ifdef INVARIANTS
1081			panic("Case D and non-match seq?");
1082#else
1083			SCTP_PRINTF("Case D, seq non-match %x vs %x?\n",
1084			    ntohl(initack_cp->init.initial_tsn),
1085			    asoc->init_seq_number);
1086#endif
1087		}
1088		switch SCTP_GET_STATE
1089			(asoc) {
1090		case SCTP_STATE_COOKIE_WAIT:
1091		case SCTP_STATE_COOKIE_ECHOED:
1092			/*
1093			 * INIT was sent but got a COOKIE_ECHO with the
1094			 * correct tags... just accept it...but we must
1095			 * process the init so that we can make sure we have
1096			 * the right seq no's.
1097			 */
1098			/* First we must process the INIT !! */
1099			retval = sctp_process_init(init_cp, stcb, net);
1100			if (retval < 0) {
1101				if (how_indx < sizeof(asoc->cookie_how))
1102					asoc->cookie_how[how_indx] = 3;
1103				return (NULL);
1104			}
1105			/* we have already processed the INIT so no problem */
1106			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1107			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1108			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1109			/* update current state */
1110			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1111				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1112			else
1113				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1114			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1115				asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1116				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1117				    stcb->sctp_ep, stcb, asoc->primary_destination);
1118
1119			} else {
1120				/* if ok, move to OPEN state */
1121				asoc->state = SCTP_STATE_OPEN;
1122			}
1123			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1124			sctp_stop_all_cookie_timers(stcb);
1125			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1126			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1127			    (inp->sctp_socket->so_qlimit == 0)
1128			    ) {
1129				/*
1130				 * Here is where collision would go if we
1131				 * did a connect() and instead got a
1132				 * init/init-ack/cookie done before the
1133				 * init-ack came back..
1134				 */
1135				stcb->sctp_ep->sctp_flags |=
1136				    SCTP_PCB_FLAGS_CONNECTED;
1137				soisconnected(stcb->sctp_ep->sctp_socket);
1138			}
1139			/* notify upper layer */
1140			*notification = SCTP_NOTIFY_ASSOC_UP;
1141			/*
1142			 * since we did not send a HB make sure we don't
1143			 * double things
1144			 */
1145			net->hb_responded = 1;
1146			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1147			    &cookie->time_entered);
1148
1149			if (stcb->asoc.sctp_autoclose_ticks &&
1150			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1151				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1152				    inp, stcb, NULL);
1153			}
1154			break;
1155		default:
1156			/*
1157			 * we're in the OPEN state (or beyond), so peer must
1158			 * have simply lost the COOKIE-ACK
1159			 */
1160			break;
1161			}	/* end switch */
1162		sctp_stop_all_cookie_timers(stcb);
1163		/*
1164		 * We ignore the return code here.. not sure if we should
1165		 * somehow abort.. but we do have an existing asoc. This
1166		 * really should not fail.
1167		 */
1168		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1169		    init_offset + sizeof(struct sctp_init_chunk),
1170		    initack_offset, sh, init_src)) {
1171			if (how_indx < sizeof(asoc->cookie_how))
1172				asoc->cookie_how[how_indx] = 4;
1173			return (NULL);
1174		}
1175		/* respond with a COOKIE-ACK */
1176		sctp_toss_old_cookies(stcb, asoc);
1177		sctp_send_cookie_ack(stcb);
1178		if (how_indx < sizeof(asoc->cookie_how))
1179			asoc->cookie_how[how_indx] = 5;
1180		return (stcb);
1181	}
1182	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1183	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1184	    cookie->tie_tag_my_vtag == 0 &&
1185	    cookie->tie_tag_peer_vtag == 0) {
1186		/*
1187		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1188		 */
1189		if (how_indx < sizeof(asoc->cookie_how))
1190			asoc->cookie_how[how_indx] = 6;
1191		return (NULL);
1192	}
1193	if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
1194	    (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
1195	    init_cp->init.initiate_tag == 0)) {
1196		/*
1197		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1198		 * should be ok, re-accept peer info
1199		 */
1200		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1201			/*
1202			 * Extension of case C. If we hit this, then the
1203			 * random number generator returned the same vtag
1204			 * when we first sent our INIT-ACK and when we later
1205			 * sent our INIT. The side with the seq numbers that
1206			 * are different will be the one that normnally
1207			 * would have hit case C. This in effect "extends"
1208			 * our vtags in this collision case to be 64 bits.
1209			 * The same collision could occur aka you get both
1210			 * vtag and seq number the same twice in a row.. but
1211			 * is much less likely. If it did happen then we
1212			 * would proceed through and bring up the assoc.. we
1213			 * may end up with the wrong stream setup however..
1214			 * which would be bad.. but there is no way to
1215			 * tell.. until we send on a stream that does not
1216			 * exist :-)
1217			 */
1218			if (how_indx < sizeof(asoc->cookie_how))
1219				asoc->cookie_how[how_indx] = 7;
1220
1221			return (NULL);
1222		}
1223		if (how_indx < sizeof(asoc->cookie_how))
1224			asoc->cookie_how[how_indx] = 8;
1225		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1226		sctp_stop_all_cookie_timers(stcb);
1227		/*
1228		 * since we did not send a HB make sure we don't double
1229		 * things
1230		 */
1231		net->hb_responded = 1;
1232		if (stcb->asoc.sctp_autoclose_ticks &&
1233		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1234			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1235			    NULL);
1236		}
1237		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1238		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1239
1240		/* Note last_cwr_tsn? where is this used? */
1241		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1242		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1243			/*
1244			 * Ok the peer probably discarded our data (if we
1245			 * echoed a cookie+data). So anything on the
1246			 * sent_queue should be marked for retransmit, we
1247			 * may not get something to kick us so it COULD
1248			 * still take a timeout to move these.. but it can't
1249			 * hurt to mark them.
1250			 */
1251			struct sctp_tmit_chunk *chk;
1252
1253			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1254				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1255					chk->sent = SCTP_DATAGRAM_RESEND;
1256					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1257					spec_flag++;
1258				}
1259			}
1260
1261		}
1262		/* process the INIT info (peer's info) */
1263		retval = sctp_process_init(init_cp, stcb, net);
1264		if (retval < 0) {
1265			if (how_indx < sizeof(asoc->cookie_how))
1266				asoc->cookie_how[how_indx] = 9;
1267			return (NULL);
1268		}
1269		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1270		    init_offset + sizeof(struct sctp_init_chunk),
1271		    initack_offset, sh, init_src)) {
1272			if (how_indx < sizeof(asoc->cookie_how))
1273				asoc->cookie_how[how_indx] = 10;
1274			return (NULL);
1275		}
1276		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1277		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1278			*notification = SCTP_NOTIFY_ASSOC_UP;
1279
1280			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1281			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1282			    (inp->sctp_socket->so_qlimit == 0)) {
1283				stcb->sctp_ep->sctp_flags |=
1284				    SCTP_PCB_FLAGS_CONNECTED;
1285				soisconnected(stcb->sctp_ep->sctp_socket);
1286			}
1287			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1288				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1289			else
1290				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1291			SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1292			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1293		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1294			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1295		} else {
1296			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1297		}
1298		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1299			asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1300			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1301			    stcb->sctp_ep, stcb, asoc->primary_destination);
1302
1303		} else {
1304			asoc->state = SCTP_STATE_OPEN;
1305		}
1306		sctp_stop_all_cookie_timers(stcb);
1307		sctp_toss_old_cookies(stcb, asoc);
1308		sctp_send_cookie_ack(stcb);
1309		if (spec_flag) {
1310			/*
1311			 * only if we have retrans set do we do this. What
1312			 * this call does is get only the COOKIE-ACK out and
1313			 * then when we return the normal call to
1314			 * sctp_chunk_output will get the retrans out behind
1315			 * this.
1316			 */
1317			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK);
1318		}
1319		if (how_indx < sizeof(asoc->cookie_how))
1320			asoc->cookie_how[how_indx] = 11;
1321
1322		return (stcb);
1323	}
1324	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1325	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1326	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1327	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1328	    cookie->tie_tag_peer_vtag != 0) {
1329		struct sctpasochead *head;
1330
1331		/*
1332		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1333		 */
1334		/* temp code */
1335		if (how_indx < sizeof(asoc->cookie_how))
1336			asoc->cookie_how[how_indx] = 12;
1337		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1338		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1339
1340		*sac_assoc_id = sctp_get_associd(stcb);
1341		/* notify upper layer */
1342		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1343		atomic_add_int(&stcb->asoc.refcnt, 1);
1344		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1345		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1346		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1347			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1348		}
1349		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1350			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1351		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1352			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1353		}
1354		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1355			asoc->state = SCTP_STATE_OPEN |
1356			    SCTP_STATE_SHUTDOWN_PENDING;
1357			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1358			    stcb->sctp_ep, stcb, asoc->primary_destination);
1359
1360		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1361			/* move to OPEN state, if not in SHUTDOWN_SENT */
1362			asoc->state = SCTP_STATE_OPEN;
1363		}
1364		asoc->pre_open_streams =
1365		    ntohs(initack_cp->init.num_outbound_streams);
1366		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1367		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1368
1369		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1370		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1371
1372		asoc->str_reset_seq_in = asoc->init_seq_number;
1373
1374		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1375		if (asoc->mapping_array) {
1376			memset(asoc->mapping_array, 0,
1377			    asoc->mapping_array_size);
1378		}
1379		SCTP_TCB_UNLOCK(stcb);
1380		SCTP_INP_INFO_WLOCK();
1381		SCTP_INP_WLOCK(stcb->sctp_ep);
1382		SCTP_TCB_LOCK(stcb);
1383		atomic_add_int(&stcb->asoc.refcnt, -1);
1384		/* send up all the data */
1385		SCTP_TCB_SEND_LOCK(stcb);
1386
1387		sctp_report_all_outbound(stcb, 1);
1388		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1389			stcb->asoc.strmout[i].stream_no = i;
1390			stcb->asoc.strmout[i].next_sequence_sent = 0;
1391			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1392		}
1393		/* process the INIT-ACK info (my info) */
1394		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1395		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1396
1397		/* pull from vtag hash */
1398		LIST_REMOVE(stcb, sctp_asocs);
1399		/* re-insert to new vtag position */
1400		head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1401		    sctppcbinfo.hashasocmark)];
1402		/*
1403		 * put it in the bucket in the vtag hash of assoc's for the
1404		 * system
1405		 */
1406		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1407
1408		/* Is this the first restart? */
1409		if (stcb->asoc.in_restart_hash == 0) {
1410			/* Ok add it to assoc_id vtag hash */
1411			head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
1412			    sctppcbinfo.hashrestartmark)];
1413			LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
1414			stcb->asoc.in_restart_hash = 1;
1415		}
1416		/* process the INIT info (peer's info) */
1417		SCTP_TCB_SEND_UNLOCK(stcb);
1418		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1419		SCTP_INP_INFO_WUNLOCK();
1420
1421		retval = sctp_process_init(init_cp, stcb, net);
1422		if (retval < 0) {
1423			if (how_indx < sizeof(asoc->cookie_how))
1424				asoc->cookie_how[how_indx] = 13;
1425
1426			return (NULL);
1427		}
1428		/*
1429		 * since we did not send a HB make sure we don't double
1430		 * things
1431		 */
1432		net->hb_responded = 1;
1433
1434		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1435		    init_offset + sizeof(struct sctp_init_chunk),
1436		    initack_offset, sh, init_src)) {
1437			if (how_indx < sizeof(asoc->cookie_how))
1438				asoc->cookie_how[how_indx] = 14;
1439
1440			return (NULL);
1441		}
1442		/* respond with a COOKIE-ACK */
1443		sctp_stop_all_cookie_timers(stcb);
1444		sctp_toss_old_cookies(stcb, asoc);
1445		sctp_send_cookie_ack(stcb);
1446		if (how_indx < sizeof(asoc->cookie_how))
1447			asoc->cookie_how[how_indx] = 15;
1448
1449		return (stcb);
1450	}
1451	if (how_indx < sizeof(asoc->cookie_how))
1452		asoc->cookie_how[how_indx] = 16;
1453	/* all other cases... */
1454	return (NULL);
1455}
1456
1457
1458/*
1459 * handle a state cookie for a new association m: input packet mbuf chain--
1460 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1461 * and the cookie signature does not exist offset: offset into mbuf to the
1462 * cookie-echo chunk length: length of the cookie chunk to: where the init
1463 * was from returns a new TCB
1464 */
1465static struct sctp_tcb *
1466sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1467    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1468    struct sctp_inpcb *inp, struct sctp_nets **netp,
1469    struct sockaddr *init_src, int *notification,
1470    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1471    uint32_t vrf_id)
1472{
1473	struct sctp_tcb *stcb;
1474	struct sctp_init_chunk *init_cp, init_buf;
1475	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1476	struct sockaddr_storage sa_store;
1477	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1478	struct sockaddr_in *sin;
1479	struct sockaddr_in6 *sin6;
1480	struct sctp_association *asoc;
1481	int chk_length;
1482	int init_offset, initack_offset, initack_limit;
1483	int retval;
1484	int error = 0;
1485	uint32_t old_tag;
1486	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1487
1488	/*
1489	 * find and validate the INIT chunk in the cookie (peer's info) the
1490	 * INIT should start after the cookie-echo header struct (chunk
1491	 * header, state cookie header struct)
1492	 */
1493	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1494	init_cp = (struct sctp_init_chunk *)
1495	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1496	    (uint8_t *) & init_buf);
1497	if (init_cp == NULL) {
1498		/* could not pull a INIT chunk in cookie */
1499		SCTPDBG(SCTP_DEBUG_INPUT1,
1500		    "process_cookie_new: could not pull INIT chunk hdr\n");
1501		return (NULL);
1502	}
1503	chk_length = ntohs(init_cp->ch.chunk_length);
1504	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1505		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1506		return (NULL);
1507	}
1508	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1509	/*
1510	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1511	 * INIT-ACK follows the INIT chunk
1512	 */
1513	initack_cp = (struct sctp_init_ack_chunk *)
1514	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1515	    (uint8_t *) & initack_buf);
1516	if (initack_cp == NULL) {
1517		/* could not pull INIT-ACK chunk in cookie */
1518		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1519		return (NULL);
1520	}
1521	chk_length = ntohs(initack_cp->ch.chunk_length);
1522	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1523		return (NULL);
1524	}
1525	/*
1526	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1527	 * "initack_limit" value.  This is because the chk_length field
1528	 * includes the length of the cookie, but the cookie is omitted when
1529	 * the INIT and INIT_ACK are tacked onto the cookie...
1530	 */
1531	initack_limit = offset + cookie_len;
1532
1533	/*
1534	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
1535	 * and popluate
1536	 */
1537	stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
1538	    ntohl(initack_cp->init.initiate_tag), vrf_id);
1539	if (stcb == NULL) {
1540		struct mbuf *op_err;
1541
1542		/* memory problem? */
1543		SCTPDBG(SCTP_DEBUG_INPUT1,
1544		    "process_cookie_new: no room for another TCB!\n");
1545		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1546
1547		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1548		    sh, op_err, vrf_id);
1549		return (NULL);
1550	}
1551	/* get the correct sctp_nets */
1552	if (netp)
1553		*netp = sctp_findnet(stcb, init_src);
1554
1555	asoc = &stcb->asoc;
1556	/* get scope variables out of cookie */
1557	asoc->ipv4_local_scope = cookie->ipv4_scope;
1558	asoc->site_scope = cookie->site_scope;
1559	asoc->local_scope = cookie->local_scope;
1560	asoc->loopback_scope = cookie->loopback_scope;
1561
1562	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
1563	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
1564		struct mbuf *op_err;
1565
1566		/*
1567		 * Houston we have a problem. The EP changed while the
1568		 * cookie was in flight. Only recourse is to abort the
1569		 * association.
1570		 */
1571		atomic_add_int(&stcb->asoc.refcnt, 1);
1572		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1573		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1574		    sh, op_err, vrf_id);
1575		atomic_add_int(&stcb->asoc.refcnt, -1);
1576		return (NULL);
1577	}
1578	/* process the INIT-ACK info (my info) */
1579	old_tag = asoc->my_vtag;
1580	asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1581	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1582	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1583	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1584	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1585	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1586	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1587	asoc->str_reset_seq_in = asoc->init_seq_number;
1588
1589	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1590
1591	/* process the INIT info (peer's info) */
1592	if (netp)
1593		retval = sctp_process_init(init_cp, stcb, *netp);
1594	else
1595		retval = 0;
1596	if (retval < 0) {
1597		atomic_add_int(&stcb->asoc.refcnt, 1);
1598		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1599		atomic_add_int(&stcb->asoc.refcnt, -1);
1600		return (NULL);
1601	}
1602	/* load all addresses */
1603	if (sctp_load_addresses_from_init(stcb, m, iphlen,
1604	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
1605	    init_src)) {
1606		atomic_add_int(&stcb->asoc.refcnt, 1);
1607		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1608		atomic_add_int(&stcb->asoc.refcnt, -1);
1609		return (NULL);
1610	}
1611	/*
1612	 * verify any preceding AUTH chunk that was skipped
1613	 */
1614	/* pull the local authentication parameters from the cookie/init-ack */
1615	sctp_auth_get_cookie_params(stcb, m,
1616	    initack_offset + sizeof(struct sctp_init_ack_chunk),
1617	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
1618	if (auth_skipped) {
1619		struct sctp_auth_chunk *auth;
1620
1621		auth = (struct sctp_auth_chunk *)
1622		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
1623		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
1624			/* auth HMAC failed, dump the assoc and packet */
1625			SCTPDBG(SCTP_DEBUG_AUTH1,
1626			    "COOKIE-ECHO: AUTH failed\n");
1627			sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
1628			return (NULL);
1629		} else {
1630			/* remaining chunks checked... good to go */
1631			stcb->asoc.authenticated = 1;
1632		}
1633	}
1634	/* update current state */
1635	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
1636	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1637		asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1638		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1639		    stcb->sctp_ep, stcb, asoc->primary_destination);
1640	} else {
1641		asoc->state = SCTP_STATE_OPEN;
1642	}
1643	sctp_stop_all_cookie_timers(stcb);
1644	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
1645	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1646
1647	/*
1648	 * if we're doing ASCONFs, check to see if we have any new local
1649	 * addresses that need to get added to the peer (eg. addresses
1650	 * changed while cookie echo in flight).  This needs to be done
1651	 * after we go to the OPEN state to do the correct asconf
1652	 * processing. else, make sure we have the correct addresses in our
1653	 * lists
1654	 */
1655
1656	/* warning, we re-use sin, sin6, sa_store here! */
1657	/* pull in local_address (our "from" address) */
1658	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
1659		/* source addr is IPv4 */
1660		sin = (struct sockaddr_in *)initack_src;
1661		memset(sin, 0, sizeof(*sin));
1662		sin->sin_family = AF_INET;
1663		sin->sin_len = sizeof(struct sockaddr_in);
1664		sin->sin_addr.s_addr = cookie->laddress[0];
1665	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
1666		/* source addr is IPv6 */
1667		sin6 = (struct sockaddr_in6 *)initack_src;
1668		memset(sin6, 0, sizeof(*sin6));
1669		sin6->sin6_family = AF_INET6;
1670		sin6->sin6_len = sizeof(struct sockaddr_in6);
1671		sin6->sin6_scope_id = cookie->scope_id;
1672		memcpy(&sin6->sin6_addr, cookie->laddress,
1673		    sizeof(sin6->sin6_addr));
1674	} else {
1675		atomic_add_int(&stcb->asoc.refcnt, 1);
1676		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
1677		atomic_add_int(&stcb->asoc.refcnt, -1);
1678		return (NULL);
1679	}
1680
1681	sctp_check_address_list(stcb, m,
1682	    initack_offset + sizeof(struct sctp_init_ack_chunk),
1683	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
1684	    initack_src, cookie->local_scope, cookie->site_scope,
1685	    cookie->ipv4_scope, cookie->loopback_scope);
1686
1687
1688	/* set up to notify upper layer */
1689	*notification = SCTP_NOTIFY_ASSOC_UP;
1690	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1691	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1692	    (inp->sctp_socket->so_qlimit == 0)) {
1693		/*
1694		 * This is an endpoint that called connect() how it got a
1695		 * cookie that is NEW is a bit of a mystery. It must be that
1696		 * the INIT was sent, but before it got there.. a complete
1697		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
1698		 * should have went to the other code.. not here.. oh well..
1699		 * a bit of protection is worth having..
1700		 */
1701		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1702		soisconnected(stcb->sctp_ep->sctp_socket);
1703	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1704	    (inp->sctp_socket->so_qlimit)) {
1705		/*
1706		 * We don't want to do anything with this one. Since it is
1707		 * the listening guy. The timer will get started for
1708		 * accepted connections in the caller.
1709		 */
1710		;
1711	}
1712	/* since we did not send a HB make sure we don't double things */
1713	if ((netp) && (*netp))
1714		(*netp)->hb_responded = 1;
1715
1716	if (stcb->asoc.sctp_autoclose_ticks &&
1717	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1718		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
1719	}
1720	/* respond with a COOKIE-ACK */
1721	/* calculate the RTT */
1722	if ((netp) && (*netp)) {
1723		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
1724		    &cookie->time_entered);
1725	}
1726	sctp_send_cookie_ack(stcb);
1727	return (stcb);
1728}
1729
1730
1731/*
1732 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
1733 * existing (non-NULL) TCB
1734 */
1735static struct mbuf *
1736sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
1737    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
1738    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
1739    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1740    struct sctp_tcb **locked_tcb, uint32_t vrf_id)
1741{
1742	struct sctp_state_cookie *cookie;
1743	struct sockaddr_in6 sin6;
1744	struct sockaddr_in sin;
1745	struct sctp_tcb *l_stcb = *stcb;
1746	struct sctp_inpcb *l_inp;
1747	struct sockaddr *to;
1748	sctp_assoc_t sac_restart_id;
1749	struct sctp_pcb *ep;
1750	struct mbuf *m_sig;
1751	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
1752	uint8_t *sig;
1753	uint8_t cookie_ok = 0;
1754	unsigned int size_of_pkt, sig_offset, cookie_offset;
1755	unsigned int cookie_len;
1756	struct timeval now;
1757	struct timeval time_expires;
1758	struct sockaddr_storage dest_store;
1759	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
1760	struct ip *iph;
1761	int notification = 0;
1762	struct sctp_nets *netl;
1763	int had_a_existing_tcb = 0;
1764
1765	SCTPDBG(SCTP_DEBUG_INPUT2,
1766	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
1767
1768	if (inp_p == NULL) {
1769		return (NULL);
1770	}
1771	/* First get the destination address setup too. */
1772	iph = mtod(m, struct ip *);
1773	if (iph->ip_v == IPVERSION) {
1774		/* its IPv4 */
1775		struct sockaddr_in *lsin;
1776
1777		lsin = (struct sockaddr_in *)(localep_sa);
1778		memset(lsin, 0, sizeof(*lsin));
1779		lsin->sin_family = AF_INET;
1780		lsin->sin_len = sizeof(*lsin);
1781		lsin->sin_port = sh->dest_port;
1782		lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
1783		size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
1784	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1785		/* its IPv6 */
1786		struct ip6_hdr *ip6;
1787		struct sockaddr_in6 *lsin6;
1788
1789		lsin6 = (struct sockaddr_in6 *)(localep_sa);
1790		memset(lsin6, 0, sizeof(*lsin6));
1791		lsin6->sin6_family = AF_INET6;
1792		lsin6->sin6_len = sizeof(struct sockaddr_in6);
1793		ip6 = mtod(m, struct ip6_hdr *);
1794		lsin6->sin6_port = sh->dest_port;
1795		lsin6->sin6_addr = ip6->ip6_dst;
1796		size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
1797	} else {
1798		return (NULL);
1799	}
1800
1801	cookie = &cp->cookie;
1802	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
1803	cookie_len = ntohs(cp->ch.chunk_length);
1804
1805	if ((cookie->peerport != sh->src_port) &&
1806	    (cookie->myport != sh->dest_port) &&
1807	    (cookie->my_vtag != sh->v_tag)) {
1808		/*
1809		 * invalid ports or bad tag.  Note that we always leave the
1810		 * v_tag in the header in network order and when we stored
1811		 * it in the my_vtag slot we also left it in network order.
1812		 * This maintains the match even though it may be in the
1813		 * opposite byte order of the machine :->
1814		 */
1815		return (NULL);
1816	}
1817	if (cookie_len > size_of_pkt ||
1818	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
1819	    sizeof(struct sctp_init_chunk) +
1820	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
1821		/* cookie too long!  or too small */
1822		return (NULL);
1823	}
1824	/*
1825	 * split off the signature into its own mbuf (since it should not be
1826	 * calculated in the sctp_hmac_m() call).
1827	 */
1828	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
1829	if (sig_offset > size_of_pkt) {
1830		/* packet not correct size! */
1831		/* XXX this may already be accounted for earlier... */
1832		return (NULL);
1833	}
1834	m_sig = m_split(m, sig_offset, M_DONTWAIT);
1835	if (m_sig == NULL) {
1836		/* out of memory or ?? */
1837		return (NULL);
1838	}
1839	/*
1840	 * compute the signature/digest for the cookie
1841	 */
1842	ep = &(*inp_p)->sctp_ep;
1843	l_inp = *inp_p;
1844	if (l_stcb) {
1845		SCTP_TCB_UNLOCK(l_stcb);
1846	}
1847	SCTP_INP_RLOCK(l_inp);
1848	if (l_stcb) {
1849		SCTP_TCB_LOCK(l_stcb);
1850	}
1851	/* which cookie is it? */
1852	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
1853	    (ep->current_secret_number != ep->last_secret_number)) {
1854		/* it's the old cookie */
1855		(void)sctp_hmac_m(SCTP_HMAC,
1856		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1857		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1858	} else {
1859		/* it's the current cookie */
1860		(void)sctp_hmac_m(SCTP_HMAC,
1861		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
1862		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1863	}
1864	/* get the signature */
1865	SCTP_INP_RUNLOCK(l_inp);
1866	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
1867	if (sig == NULL) {
1868		/* couldn't find signature */
1869		sctp_m_freem(m_sig);
1870		return (NULL);
1871	}
1872	/* compare the received digest with the computed digest */
1873	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
1874		/* try the old cookie? */
1875		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
1876		    (ep->current_secret_number != ep->last_secret_number)) {
1877			/* compute digest with old */
1878			(void)sctp_hmac_m(SCTP_HMAC,
1879			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1880			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1881			/* compare */
1882			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
1883				cookie_ok = 1;
1884		}
1885	} else {
1886		cookie_ok = 1;
1887	}
1888
1889	/*
1890	 * Now before we continue we must reconstruct our mbuf so that
1891	 * normal processing of any other chunks will work.
1892	 */
1893	{
1894		struct mbuf *m_at;
1895
1896		m_at = m;
1897		while (SCTP_BUF_NEXT(m_at) != NULL) {
1898			m_at = SCTP_BUF_NEXT(m_at);
1899		}
1900		SCTP_BUF_NEXT(m_at) = m_sig;
1901	}
1902
1903	if (cookie_ok == 0) {
1904		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
1905		SCTPDBG(SCTP_DEBUG_INPUT2,
1906		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
1907		    (uint32_t) offset, cookie_offset, sig_offset);
1908		return (NULL);
1909	}
1910	/*
1911	 * check the cookie timestamps to be sure it's not stale
1912	 */
1913	(void)SCTP_GETTIME_TIMEVAL(&now);
1914	/* Expire time is in Ticks, so we convert to seconds */
1915	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
1916	time_expires.tv_usec = cookie->time_entered.tv_usec;
1917	if (timevalcmp(&now, &time_expires, >)) {
1918		/* cookie is stale! */
1919		struct mbuf *op_err;
1920		struct sctp_stale_cookie_msg *scm;
1921		uint32_t tim;
1922
1923		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
1924		    0, M_DONTWAIT, 1, MT_DATA);
1925		if (op_err == NULL) {
1926			/* FOOBAR */
1927			return (NULL);
1928		}
1929		/* pre-reserve some space */
1930		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1931		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1932		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1933
1934		/* Set the len */
1935		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
1936		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
1937		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
1938		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
1939		    (sizeof(uint32_t))));
1940		/* seconds to usec */
1941		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
1942		/* add in usec */
1943		if (tim == 0)
1944			tim = now.tv_usec - cookie->time_entered.tv_usec;
1945		scm->time_usec = htonl(tim);
1946		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1947		    vrf_id);
1948		return (NULL);
1949	}
1950	/*
1951	 * Now we must see with the lookup address if we have an existing
1952	 * asoc. This will only happen if we were in the COOKIE-WAIT state
1953	 * and a INIT collided with us and somewhere the peer sent the
1954	 * cookie on another address besides the single address our assoc
1955	 * had for him. In this case we will have one of the tie-tags set at
1956	 * least AND the address field in the cookie can be used to look it
1957	 * up.
1958	 */
1959	to = NULL;
1960	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
1961		memset(&sin6, 0, sizeof(sin6));
1962		sin6.sin6_family = AF_INET6;
1963		sin6.sin6_len = sizeof(sin6);
1964		sin6.sin6_port = sh->src_port;
1965		sin6.sin6_scope_id = cookie->scope_id;
1966		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
1967		    sizeof(sin6.sin6_addr.s6_addr));
1968		to = (struct sockaddr *)&sin6;
1969	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
1970		memset(&sin, 0, sizeof(sin));
1971		sin.sin_family = AF_INET;
1972		sin.sin_len = sizeof(sin);
1973		sin.sin_port = sh->src_port;
1974		sin.sin_addr.s_addr = cookie->address[0];
1975		to = (struct sockaddr *)&sin;
1976	} else {
1977		/* This should not happen */
1978		return (NULL);
1979	}
1980	if ((*stcb == NULL) && to) {
1981		/* Yep, lets check */
1982		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
1983		if (*stcb == NULL) {
1984			/*
1985			 * We should have only got back the same inp. If we
1986			 * got back a different ep we have a problem. The
1987			 * original findep got back l_inp and now
1988			 */
1989			if (l_inp != *inp_p) {
1990				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
1991			}
1992		} else {
1993			if (*locked_tcb == NULL) {
1994				/*
1995				 * In this case we found the assoc only
1996				 * after we locked the create lock. This
1997				 * means we are in a colliding case and we
1998				 * must make sure that we unlock the tcb if
1999				 * its one of the cases where we throw away
2000				 * the incoming packets.
2001				 */
2002				*locked_tcb = *stcb;
2003
2004				/*
2005				 * We must also increment the inp ref count
2006				 * since the ref_count flags was set when we
2007				 * did not find the TCB, now we found it
2008				 * which reduces the refcount.. we must
2009				 * raise it back out to balance it all :-)
2010				 */
2011				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2012				if ((*stcb)->sctp_ep != l_inp) {
2013					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2014					    (*stcb)->sctp_ep, l_inp);
2015				}
2016			}
2017		}
2018	}
2019	if (to == NULL)
2020		return (NULL);
2021
2022	cookie_len -= SCTP_SIGNATURE_SIZE;
2023	if (*stcb == NULL) {
2024		/* this is the "normal" case... get a new TCB */
2025		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2026		    cookie_len, *inp_p, netp, to, &notification,
2027		    auth_skipped, auth_offset, auth_len, vrf_id);
2028	} else {
2029		/* this is abnormal... cookie-echo on existing TCB */
2030		had_a_existing_tcb = 1;
2031		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2032		    cookie, cookie_len, *inp_p, *stcb, *netp, to,
2033		    &notification, &sac_restart_id, vrf_id);
2034	}
2035
2036	if (*stcb == NULL) {
2037		/* still no TCB... must be bad cookie-echo */
2038		return (NULL);
2039	}
2040	/*
2041	 * Ok, we built an association so confirm the address we sent the
2042	 * INIT-ACK to.
2043	 */
2044	netl = sctp_findnet(*stcb, to);
2045	/*
2046	 * This code should in theory NOT run but
2047	 */
2048	if (netl == NULL) {
2049		/* TSNH! Huh, why do I need to add this address here? */
2050		int ret;
2051
2052		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2053		    SCTP_IN_COOKIE_PROC);
2054		netl = sctp_findnet(*stcb, to);
2055	}
2056	if (netl) {
2057		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2058			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2059			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2060			    netl);
2061			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2062			    (*stcb), 0, (void *)netl);
2063		}
2064	}
2065	if (*stcb) {
2066		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2067		    *stcb, NULL);
2068	}
2069	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2070		if (!had_a_existing_tcb ||
2071		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2072			/*
2073			 * If we have a NEW cookie or the connect never
2074			 * reached the connected state during collision we
2075			 * must do the TCP accept thing.
2076			 */
2077			struct socket *so, *oso;
2078			struct sctp_inpcb *inp;
2079
2080			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2081				/*
2082				 * For a restart we will keep the same
2083				 * socket, no need to do anything. I THINK!!
2084				 */
2085				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id);
2086				return (m);
2087			}
2088			oso = (*inp_p)->sctp_socket;
2089			/*
2090			 * We do this to keep the sockets side happy durin
2091			 * the sonewcon ONLY.
2092			 */
2093			NET_LOCK_GIANT();
2094			SCTP_TCB_UNLOCK((*stcb));
2095			so = sonewconn(oso, 0
2096			    );
2097			NET_UNLOCK_GIANT();
2098			SCTP_INP_WLOCK((*stcb)->sctp_ep);
2099			SCTP_TCB_LOCK((*stcb));
2100			SCTP_INP_WUNLOCK((*stcb)->sctp_ep);
2101			if (so == NULL) {
2102				struct mbuf *op_err;
2103
2104				/* Too many sockets */
2105				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2106				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2107				sctp_abort_association(*inp_p, NULL, m, iphlen,
2108				    sh, op_err, vrf_id);
2109				sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2110				return (NULL);
2111			}
2112			inp = (struct sctp_inpcb *)so->so_pcb;
2113			SCTP_INP_INCR_REF(inp);
2114			/*
2115			 * We add the unbound flag here so that if we get an
2116			 * soabort() before we get the move_pcb done, we
2117			 * will properly cleanup.
2118			 */
2119			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2120			    SCTP_PCB_FLAGS_CONNECTED |
2121			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2122			    SCTP_PCB_FLAGS_UNBOUND |
2123			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2124			    SCTP_PCB_FLAGS_DONT_WAKE);
2125			inp->sctp_features = (*inp_p)->sctp_features;
2126			inp->sctp_socket = so;
2127			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2128			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2129			inp->sctp_context = (*inp_p)->sctp_context;
2130			inp->inp_starting_point_for_iterator = NULL;
2131			/*
2132			 * copy in the authentication parameters from the
2133			 * original endpoint
2134			 */
2135			if (inp->sctp_ep.local_hmacs)
2136				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2137			inp->sctp_ep.local_hmacs =
2138			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2139			if (inp->sctp_ep.local_auth_chunks)
2140				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2141			inp->sctp_ep.local_auth_chunks =
2142			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2143			(void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
2144			    &inp->sctp_ep.shared_keys);
2145
2146			/*
2147			 * Now we must move it from one hash table to
2148			 * another and get the tcb in the right place.
2149			 */
2150			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2151
2152			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2153			SCTP_TCB_UNLOCK((*stcb));
2154
2155			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
2156			SCTP_TCB_LOCK((*stcb));
2157			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2158
2159
2160			/*
2161			 * now we must check to see if we were aborted while
2162			 * the move was going on and the lock/unlock
2163			 * happened.
2164			 */
2165			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2166				/*
2167				 * yep it was, we leave the assoc attached
2168				 * to the socket since the sctp_inpcb_free()
2169				 * call will send an abort for us.
2170				 */
2171				SCTP_INP_DECR_REF(inp);
2172				return (NULL);
2173			}
2174			SCTP_INP_DECR_REF(inp);
2175			/* Switch over to the new guy */
2176			*inp_p = inp;
2177			sctp_ulp_notify(notification, *stcb, 0, NULL);
2178
2179			/*
2180			 * Pull it from the incomplete queue and wake the
2181			 * guy
2182			 */
2183			soisconnected(so);
2184			return (m);
2185		}
2186	}
2187	if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2188		sctp_ulp_notify(notification, *stcb, 0, NULL);
2189	}
2190	return (m);
2191}
2192
2193static void
2194sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2195    struct sctp_tcb *stcb, struct sctp_nets *net)
2196{
2197	/* cp must not be used, others call this without a c-ack :-) */
2198	struct sctp_association *asoc;
2199
2200	SCTPDBG(SCTP_DEBUG_INPUT2,
2201	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2202	if (stcb == NULL)
2203		return;
2204
2205	asoc = &stcb->asoc;
2206
2207	sctp_stop_all_cookie_timers(stcb);
2208	/* process according to association state */
2209	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2210		/* state change only needed when I am in right state */
2211		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2212		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2213			asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
2214			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2215			    stcb->sctp_ep, stcb, asoc->primary_destination);
2216
2217		} else {
2218			asoc->state = SCTP_STATE_OPEN;
2219		}
2220		/* update RTO */
2221		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2222		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2223		if (asoc->overall_error_count == 0) {
2224			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2225			    &asoc->time_entered);
2226		}
2227		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2228		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL);
2229		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2230		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2231			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2232			soisconnected(stcb->sctp_ep->sctp_socket);
2233		}
2234		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2235		    stcb, net);
2236		/*
2237		 * since we did not send a HB make sure we don't double
2238		 * things
2239		 */
2240		net->hb_responded = 1;
2241
2242		if (stcb->asoc.sctp_autoclose_ticks &&
2243		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2244			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2245			    stcb->sctp_ep, stcb, NULL);
2246		}
2247		/*
2248		 * set ASCONF timer if ASCONFs are pending and allowed (eg.
2249		 * addresses changed when init/cookie echo in flight)
2250		 */
2251		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2252		    (stcb->asoc.peer_supports_asconf) &&
2253		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2254			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2255			    stcb->sctp_ep, stcb,
2256			    stcb->asoc.primary_destination);
2257		}
2258	}
2259	/* Toss the cookie if I can */
2260	sctp_toss_old_cookies(stcb, asoc);
2261	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2262		/* Restart the timer if we have pending data */
2263		struct sctp_tmit_chunk *chk;
2264
2265		chk = TAILQ_FIRST(&asoc->sent_queue);
2266		if (chk) {
2267			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2268			    stcb, chk->whoTo);
2269		}
2270	}
2271}
2272
2273static void
2274sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2275    struct sctp_tcb *stcb)
2276{
2277	struct sctp_nets *net;
2278	struct sctp_tmit_chunk *lchk;
2279	uint32_t tsn;
2280
2281	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2282		return;
2283	}
2284	SCTP_STAT_INCR(sctps_recvecne);
2285	tsn = ntohl(cp->tsn);
2286	/* ECN Nonce stuff: need a resync and disable the nonce sum check */
2287	/* Also we make sure we disable the nonce_wait */
2288	lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2289	if (lchk == NULL) {
2290		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2291	} else {
2292		stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2293	}
2294	stcb->asoc.nonce_wait_for_ecne = 0;
2295	stcb->asoc.nonce_sum_check = 0;
2296
2297	/* Find where it was sent, if possible */
2298	net = NULL;
2299	lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2300	while (lchk) {
2301		if (lchk->rec.data.TSN_seq == tsn) {
2302			net = lchk->whoTo;
2303			break;
2304		}
2305		if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2306			break;
2307		lchk = TAILQ_NEXT(lchk, sctp_next);
2308	}
2309	if (net == NULL)
2310		/* default is we use the primary */
2311		net = stcb->asoc.primary_destination;
2312
2313	if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2314		int old_cwnd;
2315
2316		old_cwnd = net->cwnd;
2317		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2318		net->ssthresh = net->cwnd / 2;
2319		if (net->ssthresh < net->mtu) {
2320			net->ssthresh = net->mtu;
2321			/* here back off the timer as well, to slow us down */
2322			net->RTO <<= 1;
2323		}
2324		net->cwnd = net->ssthresh;
2325		if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
2326			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2327		}
2328		/*
2329		 * we reduce once every RTT. So we will only lower cwnd at
2330		 * the next sending seq i.e. the resync_tsn.
2331		 */
2332		stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2333	}
2334	/*
2335	 * We always send a CWR this way if our previous one was lost our
2336	 * peer will get an update, or if it is not time again to reduce we
2337	 * still get the cwr to the peer.
2338	 */
2339	sctp_send_cwr(stcb, net, tsn);
2340}
2341
2342static void
2343sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2344{
2345	/*
2346	 * Here we get a CWR from the peer. We must look in the outqueue and
2347	 * make sure that we have a covered ECNE in teh control chunk part.
2348	 * If so remove it.
2349	 */
2350	struct sctp_tmit_chunk *chk;
2351	struct sctp_ecne_chunk *ecne;
2352
2353	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2354		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2355			continue;
2356		}
2357		/*
2358		 * Look for and remove if it is the right TSN. Since there
2359		 * is only ONE ECNE on the control queue at any one time we
2360		 * don't need to worry about more than one!
2361		 */
2362		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2363		if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2364		    MAX_TSN) || (cp->tsn == ecne->tsn)) {
2365			/* this covers this ECNE, we can remove it */
2366			stcb->asoc.ecn_echo_cnt_onq--;
2367			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2368			    sctp_next);
2369			if (chk->data) {
2370				sctp_m_freem(chk->data);
2371				chk->data = NULL;
2372			}
2373			stcb->asoc.ctrl_queue_cnt--;
2374			sctp_free_remote_addr(chk->whoTo);
2375			sctp_free_a_chunk(stcb, chk);
2376			break;
2377		}
2378	}
2379}
2380
2381static void
2382sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2383    struct sctp_tcb *stcb, struct sctp_nets *net)
2384{
2385	struct sctp_association *asoc;
2386
2387	SCTPDBG(SCTP_DEBUG_INPUT2,
2388	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
2389	if (stcb == NULL)
2390		return;
2391
2392	asoc = &stcb->asoc;
2393	/* process according to association state */
2394	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
2395		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
2396		SCTP_TCB_UNLOCK(stcb);
2397		return;
2398	}
2399	/* notify upper layer protocol */
2400	if (stcb->sctp_socket) {
2401		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
2402		/* are the queues empty? they should be */
2403		if (!TAILQ_EMPTY(&asoc->send_queue) ||
2404		    !TAILQ_EMPTY(&asoc->sent_queue) ||
2405		    !TAILQ_EMPTY(&asoc->out_wheel)) {
2406			sctp_report_all_outbound(stcb, 0);
2407		}
2408	}
2409	/* stop the timer */
2410	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2411	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
2412	/* free the TCB */
2413	sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2414	return;
2415}
2416
2417static int
2418process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
2419    struct sctp_nets *net, uint8_t flg)
2420{
2421	switch (desc->chunk_type) {
2422		case SCTP_DATA:
2423		/* find the tsn to resend (possibly */
2424		{
2425			uint32_t tsn;
2426			struct sctp_tmit_chunk *tp1;
2427
2428			tsn = ntohl(desc->tsn_ifany);
2429			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2430			while (tp1) {
2431				if (tp1->rec.data.TSN_seq == tsn) {
2432					/* found it */
2433					break;
2434				}
2435				if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
2436				    MAX_TSN)) {
2437					/* not found */
2438					tp1 = NULL;
2439					break;
2440				}
2441				tp1 = TAILQ_NEXT(tp1, sctp_next);
2442			}
2443			if (tp1 == NULL) {
2444				/*
2445				 * Do it the other way , aka without paying
2446				 * attention to queue seq order.
2447				 */
2448				SCTP_STAT_INCR(sctps_pdrpdnfnd);
2449				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2450				while (tp1) {
2451					if (tp1->rec.data.TSN_seq == tsn) {
2452						/* found it */
2453						break;
2454					}
2455					tp1 = TAILQ_NEXT(tp1, sctp_next);
2456				}
2457			}
2458			if (tp1 == NULL) {
2459				SCTP_STAT_INCR(sctps_pdrptsnnf);
2460			}
2461			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
2462				uint8_t *ddp;
2463
2464				if ((stcb->asoc.peers_rwnd == 0) &&
2465				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
2466					SCTP_STAT_INCR(sctps_pdrpdiwnp);
2467					return (0);
2468				}
2469				if (stcb->asoc.peers_rwnd == 0 &&
2470				    (flg & SCTP_FROM_MIDDLE_BOX)) {
2471					SCTP_STAT_INCR(sctps_pdrpdizrw);
2472					return (0);
2473				}
2474				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
2475				    sizeof(struct sctp_data_chunk));
2476				{
2477					unsigned int iii;
2478
2479					for (iii = 0; iii < sizeof(desc->data_bytes);
2480					    iii++) {
2481						if (ddp[iii] != desc->data_bytes[iii]) {
2482							SCTP_STAT_INCR(sctps_pdrpbadd);
2483							return (-1);
2484						}
2485					}
2486				}
2487				/*
2488				 * We zero out the nonce so resync not
2489				 * needed
2490				 */
2491				tp1->rec.data.ect_nonce = 0;
2492
2493				if (tp1->do_rtt) {
2494					/*
2495					 * this guy had a RTO calculation
2496					 * pending on it, cancel it
2497					 */
2498					tp1->do_rtt = 0;
2499				}
2500				SCTP_STAT_INCR(sctps_pdrpmark);
2501				if (tp1->sent != SCTP_DATAGRAM_RESEND)
2502					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2503				tp1->sent = SCTP_DATAGRAM_RESEND;
2504				/*
2505				 * mark it as if we were doing a FR, since
2506				 * we will be getting gap ack reports behind
2507				 * the info from the router.
2508				 */
2509				tp1->rec.data.doing_fast_retransmit = 1;
2510				/*
2511				 * mark the tsn with what sequences can
2512				 * cause a new FR.
2513				 */
2514				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
2515					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
2516				} else {
2517					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
2518				}
2519
2520				/* restart the timer */
2521				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2522				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2523				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2524				    stcb, tp1->whoTo);
2525
2526				/* fix counts and things */
2527				if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
2528					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
2529					    tp1->whoTo->flight_size,
2530					    tp1->book_size,
2531					    (uintptr_t) stcb,
2532					    tp1->rec.data.TSN_seq);
2533				}
2534				sctp_flight_size_decrease(tp1);
2535				sctp_total_flight_decrease(stcb, tp1);
2536			} {
2537				/* audit code */
2538				unsigned int audit;
2539
2540				audit = 0;
2541				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
2542					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2543						audit++;
2544				}
2545				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
2546				    sctp_next) {
2547					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2548						audit++;
2549				}
2550				if (audit != stcb->asoc.sent_queue_retran_cnt) {
2551					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
2552					    audit, stcb->asoc.sent_queue_retran_cnt);
2553#ifndef SCTP_AUDITING_ENABLED
2554					stcb->asoc.sent_queue_retran_cnt = audit;
2555#endif
2556				}
2557			}
2558		}
2559		break;
2560	case SCTP_ASCONF:
2561		{
2562			struct sctp_tmit_chunk *asconf;
2563
2564			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
2565			    sctp_next) {
2566				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
2567					break;
2568				}
2569			}
2570			if (asconf) {
2571				if (asconf->sent != SCTP_DATAGRAM_RESEND)
2572					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2573				asconf->sent = SCTP_DATAGRAM_RESEND;
2574				asconf->snd_count--;
2575			}
2576		}
2577		break;
2578	case SCTP_INITIATION:
2579		/* resend the INIT */
2580		stcb->asoc.dropped_special_cnt++;
2581		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
2582			/*
2583			 * If we can get it in, in a few attempts we do
2584			 * this, otherwise we let the timer fire.
2585			 */
2586			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
2587			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
2588			sctp_send_initiate(stcb->sctp_ep, stcb);
2589		}
2590		break;
2591	case SCTP_SELECTIVE_ACK:
2592		/* resend the sack */
2593		sctp_send_sack(stcb);
2594		break;
2595	case SCTP_HEARTBEAT_REQUEST:
2596		/* resend a demand HB */
2597		(void)sctp_send_hb(stcb, 1, net);
2598		break;
2599	case SCTP_SHUTDOWN:
2600		sctp_send_shutdown(stcb, net);
2601		break;
2602	case SCTP_SHUTDOWN_ACK:
2603		sctp_send_shutdown_ack(stcb, net);
2604		break;
2605	case SCTP_COOKIE_ECHO:
2606		{
2607			struct sctp_tmit_chunk *cookie;
2608
2609			cookie = NULL;
2610			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
2611			    sctp_next) {
2612				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
2613					break;
2614				}
2615			}
2616			if (cookie) {
2617				if (cookie->sent != SCTP_DATAGRAM_RESEND)
2618					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2619				cookie->sent = SCTP_DATAGRAM_RESEND;
2620				sctp_stop_all_cookie_timers(stcb);
2621			}
2622		}
2623		break;
2624	case SCTP_COOKIE_ACK:
2625		sctp_send_cookie_ack(stcb);
2626		break;
2627	case SCTP_ASCONF_ACK:
2628		/* resend last asconf ack */
2629		sctp_send_asconf_ack(stcb, 1);
2630		break;
2631	case SCTP_FORWARD_CUM_TSN:
2632		send_forward_tsn(stcb, &stcb->asoc);
2633		break;
2634		/* can't do anything with these */
2635	case SCTP_PACKET_DROPPED:
2636	case SCTP_INITIATION_ACK:	/* this should not happen */
2637	case SCTP_HEARTBEAT_ACK:
2638	case SCTP_ABORT_ASSOCIATION:
2639	case SCTP_OPERATION_ERROR:
2640	case SCTP_SHUTDOWN_COMPLETE:
2641	case SCTP_ECN_ECHO:
2642	case SCTP_ECN_CWR:
2643	default:
2644		break;
2645	}
2646	return (0);
2647}
2648
2649void
2650sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2651{
2652	int i;
2653	uint16_t temp;
2654
2655	/*
2656	 * We set things to 0xffff since this is the last delivered sequence
2657	 * and we will be sending in 0 after the reset.
2658	 */
2659
2660	if (number_entries) {
2661		for (i = 0; i < number_entries; i++) {
2662			temp = ntohs(list[i]);
2663			if (temp >= stcb->asoc.streamincnt) {
2664				continue;
2665			}
2666			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
2667		}
2668	} else {
2669		list = NULL;
2670		for (i = 0; i < stcb->asoc.streamincnt; i++) {
2671			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
2672		}
2673	}
2674	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list);
2675}
2676
2677static void
2678sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2679{
2680	int i;
2681
2682	if (number_entries == 0) {
2683		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2684			stcb->asoc.strmout[i].next_sequence_sent = 0;
2685		}
2686	} else if (number_entries) {
2687		for (i = 0; i < number_entries; i++) {
2688			uint16_t temp;
2689
2690			temp = ntohs(list[i]);
2691			if (temp >= stcb->asoc.streamoutcnt) {
2692				/* no such stream */
2693				continue;
2694			}
2695			stcb->asoc.strmout[temp].next_sequence_sent = 0;
2696		}
2697	}
2698	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list);
2699}
2700
2701
2702struct sctp_stream_reset_out_request *
2703sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
2704{
2705	struct sctp_association *asoc;
2706	struct sctp_stream_reset_out_req *req;
2707	struct sctp_stream_reset_out_request *r;
2708	struct sctp_tmit_chunk *chk;
2709	int len, clen;
2710
2711	asoc = &stcb->asoc;
2712	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
2713		asoc->stream_reset_outstanding = 0;
2714		return (NULL);
2715	}
2716	if (stcb->asoc.str_reset == NULL) {
2717		asoc->stream_reset_outstanding = 0;
2718		return (NULL);
2719	}
2720	chk = stcb->asoc.str_reset;
2721	if (chk->data == NULL) {
2722		return (NULL);
2723	}
2724	if (bchk) {
2725		/* he wants a copy of the chk pointer */
2726		*bchk = chk;
2727	}
2728	clen = chk->send_size;
2729	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
2730	r = &req->sr_req;
2731	if (ntohl(r->request_seq) == seq) {
2732		/* found it */
2733		return (r);
2734	}
2735	len = SCTP_SIZE32(ntohs(r->ph.param_length));
2736	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
2737		/* move to the next one, there can only be a max of two */
2738		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
2739		if (ntohl(r->request_seq) == seq) {
2740			return (r);
2741		}
2742	}
2743	/* that seq is not here */
2744	return (NULL);
2745}
2746
2747static void
2748sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
2749{
2750	struct sctp_association *asoc;
2751	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
2752
2753	if (stcb->asoc.str_reset == NULL) {
2754		return;
2755	}
2756	asoc = &stcb->asoc;
2757
2758	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
2759	TAILQ_REMOVE(&asoc->control_send_queue,
2760	    chk,
2761	    sctp_next);
2762	if (chk->data) {
2763		sctp_m_freem(chk->data);
2764		chk->data = NULL;
2765	}
2766	asoc->ctrl_queue_cnt--;
2767	sctp_free_remote_addr(chk->whoTo);
2768
2769	sctp_free_a_chunk(stcb, chk);
2770	stcb->asoc.str_reset = NULL;
2771}
2772
2773
2774static int
2775sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
2776    uint32_t seq, uint32_t action,
2777    struct sctp_stream_reset_response *respin)
2778{
2779	uint16_t type;
2780	int lparm_len;
2781	struct sctp_association *asoc = &stcb->asoc;
2782	struct sctp_tmit_chunk *chk;
2783	struct sctp_stream_reset_out_request *srparam;
2784	int number_entries;
2785
2786	if (asoc->stream_reset_outstanding == 0) {
2787		/* duplicate */
2788		return (0);
2789	}
2790	if (seq == stcb->asoc.str_reset_seq_out) {
2791		srparam = sctp_find_stream_reset(stcb, seq, &chk);
2792		if (srparam) {
2793			stcb->asoc.str_reset_seq_out++;
2794			type = ntohs(srparam->ph.param_type);
2795			lparm_len = ntohs(srparam->ph.param_length);
2796			if (type == SCTP_STR_RESET_OUT_REQUEST) {
2797				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
2798				asoc->stream_reset_out_is_outstanding = 0;
2799				if (asoc->stream_reset_outstanding)
2800					asoc->stream_reset_outstanding--;
2801				if (action == SCTP_STREAM_RESET_PERFORMED) {
2802					/* do it */
2803					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
2804				} else {
2805					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams);
2806				}
2807			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
2808				/* Answered my request */
2809				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
2810				if (asoc->stream_reset_outstanding)
2811					asoc->stream_reset_outstanding--;
2812				if (action != SCTP_STREAM_RESET_PERFORMED) {
2813					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams);
2814				}
2815			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
2816				/**
2817				 * a) Adopt the new in tsn.
2818				 * b) reset the map
2819				 * c) Adopt the new out-tsn
2820				 */
2821				struct sctp_stream_reset_response_tsn *resp;
2822				struct sctp_forward_tsn_chunk fwdtsn;
2823				int abort_flag = 0;
2824
2825				if (respin == NULL) {
2826					/* huh ? */
2827					return (0);
2828				}
2829				if (action == SCTP_STREAM_RESET_PERFORMED) {
2830					resp = (struct sctp_stream_reset_response_tsn *)respin;
2831					asoc->stream_reset_outstanding--;
2832					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2833					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2834					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
2835					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2836					if (abort_flag) {
2837						return (1);
2838					}
2839					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
2840					stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2841					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
2842					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2843					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
2844					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
2845
2846					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2847					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2848
2849				}
2850			}
2851			/* get rid of the request and get the request flags */
2852			if (asoc->stream_reset_outstanding == 0) {
2853				sctp_clean_up_stream_reset(stcb);
2854			}
2855		}
2856	}
2857	return (0);
2858}
2859
2860static void
2861sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
2862    struct sctp_tmit_chunk *chk,
2863    struct sctp_stream_reset_in_request *req)
2864{
2865	uint32_t seq;
2866	int len, i;
2867	int number_entries;
2868	uint16_t temp;
2869
2870	/*
2871	 * peer wants me to send a str-reset to him for my outgoing seq's if
2872	 * seq_in is right.
2873	 */
2874	struct sctp_association *asoc = &stcb->asoc;
2875
2876	seq = ntohl(req->request_seq);
2877	if (asoc->str_reset_seq_in == seq) {
2878		if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
2879			len = ntohs(req->ph.param_length);
2880			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
2881			for (i = 0; i < number_entries; i++) {
2882				temp = ntohs(req->list_of_streams[i]);
2883				req->list_of_streams[i] = temp;
2884			}
2885			/* move the reset action back one */
2886			asoc->last_reset_action[1] = asoc->last_reset_action[0];
2887			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2888			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
2889			    asoc->str_reset_seq_out,
2890			    seq, (asoc->sending_seq - 1));
2891			asoc->stream_reset_out_is_outstanding = 1;
2892			asoc->str_reset = chk;
2893			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
2894			stcb->asoc.stream_reset_outstanding++;
2895		} else {
2896			/* Can't do it, since we have sent one out */
2897			asoc->last_reset_action[1] = asoc->last_reset_action[0];
2898			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
2899			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2900		}
2901		asoc->str_reset_seq_in++;
2902	} else if (asoc->str_reset_seq_in - 1 == seq) {
2903		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2904	} else if (asoc->str_reset_seq_in - 2 == seq) {
2905		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
2906	} else {
2907		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2908	}
2909}
2910
2911static int
2912sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
2913    struct sctp_tmit_chunk *chk,
2914    struct sctp_stream_reset_tsn_request *req)
2915{
2916	/* reset all in and out and update the tsn */
2917	/*
2918	 * A) reset my str-seq's on in and out. B) Select a receive next,
2919	 * and set cum-ack to it. Also process this selected number as a
2920	 * fwd-tsn as well. C) set in the response my next sending seq.
2921	 */
2922	struct sctp_forward_tsn_chunk fwdtsn;
2923	struct sctp_association *asoc = &stcb->asoc;
2924	int abort_flag = 0;
2925	uint32_t seq;
2926
2927	seq = ntohl(req->request_seq);
2928	if (asoc->str_reset_seq_in == seq) {
2929		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2930		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2931		fwdtsn.ch.chunk_flags = 0;
2932		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
2933		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2934		if (abort_flag) {
2935			return (1);
2936		}
2937		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
2938		stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2939		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
2940		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2941		atomic_add_int(&stcb->asoc.sending_seq, 1);
2942		/* save off historical data for retrans */
2943		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
2944		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
2945		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
2946		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
2947
2948		sctp_add_stream_reset_result_tsn(chk,
2949		    ntohl(req->request_seq),
2950		    SCTP_STREAM_RESET_PERFORMED,
2951		    stcb->asoc.sending_seq,
2952		    stcb->asoc.mapping_array_base_tsn);
2953		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2954		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2955		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
2956		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2957
2958		asoc->str_reset_seq_in++;
2959	} else if (asoc->str_reset_seq_in - 1 == seq) {
2960		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
2961		    stcb->asoc.last_sending_seq[0],
2962		    stcb->asoc.last_base_tsnsent[0]
2963		    );
2964	} else if (asoc->str_reset_seq_in - 2 == seq) {
2965		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
2966		    stcb->asoc.last_sending_seq[1],
2967		    stcb->asoc.last_base_tsnsent[1]
2968		    );
2969	} else {
2970		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2971	}
2972	return (0);
2973}
2974
2975static void
2976sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
2977    struct sctp_tmit_chunk *chk,
2978    struct sctp_stream_reset_out_request *req)
2979{
2980	uint32_t seq, tsn;
2981	int number_entries, len;
2982	struct sctp_association *asoc = &stcb->asoc;
2983
2984	seq = ntohl(req->request_seq);
2985
2986	/* now if its not a duplicate we process it */
2987	if (asoc->str_reset_seq_in == seq) {
2988		len = ntohs(req->ph.param_length);
2989		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
2990		/*
2991		 * the sender is resetting, handle the list issue.. we must
2992		 * a) verify if we can do the reset, if so no problem b) If
2993		 * we can't do the reset we must copy the request. c) queue
2994		 * it, and setup the data in processor to trigger it off
2995		 * when needed and dequeue all the queued data.
2996		 */
2997		tsn = ntohl(req->send_reset_at_tsn);
2998
2999		/* move the reset action back one */
3000		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3001		if ((tsn == asoc->cumulative_tsn) ||
3002		    (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3003			/* we can do it now */
3004			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3005			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3006			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3007		} else {
3008			/*
3009			 * we must queue it up and thus wait for the TSN's
3010			 * to arrive that are at or before tsn
3011			 */
3012			struct sctp_stream_reset_list *liste;
3013			int siz;
3014
3015			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3016			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3017			    siz, SCTP_M_STRESET);
3018			if (liste == NULL) {
3019				/* gak out of memory */
3020				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3021				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3022				return;
3023			}
3024			liste->tsn = tsn;
3025			liste->number_entries = number_entries;
3026			memcpy(&liste->req, req,
3027			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3028			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3029			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3030			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3031		}
3032		asoc->str_reset_seq_in++;
3033	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3034		/*
3035		 * one seq back, just echo back last action since my
3036		 * response was lost.
3037		 */
3038		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3039	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3040		/*
3041		 * two seq back, just echo back last action since my
3042		 * response was lost.
3043		 */
3044		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3045	} else {
3046		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3047	}
3048}
3049
3050static int
3051sctp_handle_stream_reset(struct sctp_tcb *stcb, struct sctp_stream_reset_out_req *sr_req)
3052{
3053	int chk_length, param_len, ptype;
3054	uint32_t seq;
3055	int num_req = 0;
3056	struct sctp_tmit_chunk *chk;
3057	struct sctp_chunkhdr *ch;
3058	struct sctp_paramhdr *ph;
3059	int ret_code = 0;
3060	int num_param = 0;
3061
3062	/* now it may be a reset or a reset-response */
3063	chk_length = ntohs(sr_req->ch.chunk_length);
3064
3065	/* setup for adding the response */
3066	sctp_alloc_a_chunk(stcb, chk);
3067	if (chk == NULL) {
3068		return (ret_code);
3069	}
3070	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3071	chk->rec.chunk_id.can_take_data = 0;
3072	chk->asoc = &stcb->asoc;
3073	chk->no_fr_allowed = 0;
3074	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3075	chk->book_size_scale = 0;
3076	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3077	if (chk->data == NULL) {
3078strres_nochunk:
3079		if (chk->data) {
3080			sctp_m_freem(chk->data);
3081			chk->data = NULL;
3082		}
3083		sctp_free_a_chunk(stcb, chk);
3084		return (ret_code);
3085	}
3086	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3087
3088	/* setup chunk parameters */
3089	chk->sent = SCTP_DATAGRAM_UNSENT;
3090	chk->snd_count = 0;
3091	chk->whoTo = stcb->asoc.primary_destination;
3092	atomic_add_int(&chk->whoTo->ref_count, 1);
3093
3094	ch = mtod(chk->data, struct sctp_chunkhdr *);
3095	ch->chunk_type = SCTP_STREAM_RESET;
3096	ch->chunk_flags = 0;
3097	ch->chunk_length = htons(chk->send_size);
3098	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3099	ph = (struct sctp_paramhdr *)&sr_req->sr_req;
3100	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3101		param_len = ntohs(ph->param_length);
3102		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3103			/* bad param */
3104			break;
3105		}
3106		ptype = ntohs(ph->param_type);
3107		num_param++;
3108		if (num_param > SCTP_MAX_RESET_PARAMS) {
3109			/* hit the max of parameters already sorry.. */
3110			break;
3111		}
3112		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3113			struct sctp_stream_reset_out_request *req_out;
3114
3115			req_out = (struct sctp_stream_reset_out_request *)ph;
3116			num_req++;
3117			if (stcb->asoc.stream_reset_outstanding) {
3118				seq = ntohl(req_out->response_seq);
3119				if (seq == stcb->asoc.str_reset_seq_out) {
3120					/* implicit ack */
3121					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3122				}
3123			}
3124			sctp_handle_str_reset_request_out(stcb, chk, req_out);
3125		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3126			struct sctp_stream_reset_in_request *req_in;
3127
3128			num_req++;
3129			req_in = (struct sctp_stream_reset_in_request *)ph;
3130			sctp_handle_str_reset_request_in(stcb, chk, req_in);
3131		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3132			struct sctp_stream_reset_tsn_request *req_tsn;
3133
3134			num_req++;
3135			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3136			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3137				ret_code = 1;
3138				goto strres_nochunk;
3139			}
3140			/* no more */
3141			break;
3142		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3143			struct sctp_stream_reset_response *resp;
3144			uint32_t result;
3145
3146			resp = (struct sctp_stream_reset_response *)ph;
3147			seq = ntohl(resp->response_seq);
3148			result = ntohl(resp->result);
3149			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3150				ret_code = 1;
3151				goto strres_nochunk;
3152			}
3153		} else {
3154			break;
3155		}
3156
3157		ph = (struct sctp_paramhdr *)((caddr_t)ph + SCTP_SIZE32(param_len));
3158		chk_length -= SCTP_SIZE32(param_len);
3159	}
3160	if (num_req == 0) {
3161		/* we have no response free the stuff */
3162		goto strres_nochunk;
3163	}
3164	/* ok we have a chunk to link in */
3165	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3166	    chk,
3167	    sctp_next);
3168	stcb->asoc.ctrl_queue_cnt++;
3169	return (ret_code);
3170}
3171
3172/*
3173 * Handle a router or endpoints report of a packet loss, there are two ways
3174 * to handle this, either we get the whole packet and must disect it
3175 * ourselves (possibly with truncation and or corruption) or it is a summary
3176 * from a middle box that did the disectting for us.
3177 */
3178static void
3179sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3180    struct sctp_tcb *stcb, struct sctp_nets *net)
3181{
3182	uint32_t bottle_bw, on_queue;
3183	uint16_t trunc_len;
3184	unsigned int chlen;
3185	unsigned int at;
3186	struct sctp_chunk_desc desc;
3187	struct sctp_chunkhdr *ch;
3188
3189	chlen = ntohs(cp->ch.chunk_length);
3190	chlen -= sizeof(struct sctp_pktdrop_chunk);
3191	/* XXX possible chlen underflow */
3192	if (chlen == 0) {
3193		ch = NULL;
3194		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3195			SCTP_STAT_INCR(sctps_pdrpbwrpt);
3196	} else {
3197		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3198		chlen -= sizeof(struct sctphdr);
3199		/* XXX possible chlen underflow */
3200		memset(&desc, 0, sizeof(desc));
3201	}
3202	trunc_len = (uint16_t) ntohs(cp->trunc_len);
3203	/* now the chunks themselves */
3204	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3205		desc.chunk_type = ch->chunk_type;
3206		/* get amount we need to move */
3207		at = ntohs(ch->chunk_length);
3208		if (at < sizeof(struct sctp_chunkhdr)) {
3209			/* corrupt chunk, maybe at the end? */
3210			SCTP_STAT_INCR(sctps_pdrpcrupt);
3211			break;
3212		}
3213		if (trunc_len == 0) {
3214			/* we are supposed to have all of it */
3215			if (at > chlen) {
3216				/* corrupt skip it */
3217				SCTP_STAT_INCR(sctps_pdrpcrupt);
3218				break;
3219			}
3220		} else {
3221			/* is there enough of it left ? */
3222			if (desc.chunk_type == SCTP_DATA) {
3223				if (chlen < (sizeof(struct sctp_data_chunk) +
3224				    sizeof(desc.data_bytes))) {
3225					break;
3226				}
3227			} else {
3228				if (chlen < sizeof(struct sctp_chunkhdr)) {
3229					break;
3230				}
3231			}
3232		}
3233		if (desc.chunk_type == SCTP_DATA) {
3234			/* can we get out the tsn? */
3235			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3236				SCTP_STAT_INCR(sctps_pdrpmbda);
3237
3238			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
3239				/* yep */
3240				struct sctp_data_chunk *dcp;
3241				uint8_t *ddp;
3242				unsigned int iii;
3243
3244				dcp = (struct sctp_data_chunk *)ch;
3245				ddp = (uint8_t *) (dcp + 1);
3246				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
3247					desc.data_bytes[iii] = ddp[iii];
3248				}
3249				desc.tsn_ifany = dcp->dp.tsn;
3250			} else {
3251				/* nope we are done. */
3252				SCTP_STAT_INCR(sctps_pdrpnedat);
3253				break;
3254			}
3255		} else {
3256			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3257				SCTP_STAT_INCR(sctps_pdrpmbct);
3258		}
3259
3260		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
3261			SCTP_STAT_INCR(sctps_pdrppdbrk);
3262			break;
3263		}
3264		if (SCTP_SIZE32(at) > chlen) {
3265			break;
3266		}
3267		chlen -= SCTP_SIZE32(at);
3268		if (chlen < sizeof(struct sctp_chunkhdr)) {
3269			/* done, none left */
3270			break;
3271		}
3272		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
3273	}
3274	/* Now update any rwnd --- possibly */
3275	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
3276		/* From a peer, we get a rwnd report */
3277		uint32_t a_rwnd;
3278
3279		SCTP_STAT_INCR(sctps_pdrpfehos);
3280
3281		bottle_bw = ntohl(cp->bottle_bw);
3282		on_queue = ntohl(cp->current_onq);
3283		if (bottle_bw && on_queue) {
3284			/* a rwnd report is in here */
3285			if (bottle_bw > on_queue)
3286				a_rwnd = bottle_bw - on_queue;
3287			else
3288				a_rwnd = 0;
3289
3290			if (a_rwnd == 0)
3291				stcb->asoc.peers_rwnd = 0;
3292			else {
3293				if (a_rwnd > stcb->asoc.total_flight) {
3294					stcb->asoc.peers_rwnd =
3295					    a_rwnd - stcb->asoc.total_flight;
3296				} else {
3297					stcb->asoc.peers_rwnd = 0;
3298				}
3299				if (stcb->asoc.peers_rwnd <
3300				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3301					/* SWS sender side engages */
3302					stcb->asoc.peers_rwnd = 0;
3303				}
3304			}
3305		}
3306	} else {
3307		SCTP_STAT_INCR(sctps_pdrpfmbox);
3308	}
3309
3310	/* now middle boxes in sat networks get a cwnd bump */
3311	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
3312	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
3313	    (stcb->asoc.sat_network)) {
3314		/*
3315		 * This is debateable but for sat networks it makes sense
3316		 * Note if a T3 timer has went off, we will prohibit any
3317		 * changes to cwnd until we exit the t3 loss recovery.
3318		 */
3319		uint32_t bw_avail;
3320		int rtt, incr;
3321
3322		int old_cwnd = net->cwnd;
3323
3324		/* need real RTT for this calc */
3325		rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
3326		/* get bottle neck bw */
3327		bottle_bw = ntohl(cp->bottle_bw);
3328		/* and whats on queue */
3329		on_queue = ntohl(cp->current_onq);
3330		/*
3331		 * adjust the on-queue if our flight is more it could be
3332		 * that the router has not yet gotten data "in-flight" to it
3333		 */
3334		if (on_queue < net->flight_size)
3335			on_queue = net->flight_size;
3336
3337		/* calculate the available space */
3338		bw_avail = (bottle_bw * rtt) / 1000;
3339		if (bw_avail > bottle_bw) {
3340			/*
3341			 * Cap the growth to no more than the bottle neck.
3342			 * This can happen as RTT slides up due to queues.
3343			 * It also means if you have more than a 1 second
3344			 * RTT with a empty queue you will be limited to the
3345			 * bottle_bw per second no matter if other points
3346			 * have 1/2 the RTT and you could get more out...
3347			 */
3348			bw_avail = bottle_bw;
3349		}
3350		if (on_queue > bw_avail) {
3351			/*
3352			 * No room for anything else don't allow anything
3353			 * else to be "added to the fire".
3354			 */
3355			int seg_inflight, seg_onqueue, my_portion;
3356
3357			net->partial_bytes_acked = 0;
3358
3359			/* how much are we over queue size? */
3360			incr = on_queue - bw_avail;
3361			if (stcb->asoc.seen_a_sack_this_pkt) {
3362				/*
3363				 * undo any cwnd adjustment that the sack
3364				 * might have made
3365				 */
3366				net->cwnd = net->prev_cwnd;
3367			}
3368			/* Now how much of that is mine? */
3369			seg_inflight = net->flight_size / net->mtu;
3370			seg_onqueue = on_queue / net->mtu;
3371			my_portion = (incr * seg_inflight) / seg_onqueue;
3372
3373			/* Have I made an adjustment already */
3374			if (net->cwnd > net->flight_size) {
3375				/*
3376				 * for this flight I made an adjustment we
3377				 * need to decrease the portion by a share
3378				 * our previous adjustment.
3379				 */
3380				int diff_adj;
3381
3382				diff_adj = net->cwnd - net->flight_size;
3383				if (diff_adj > my_portion)
3384					my_portion = 0;
3385				else
3386					my_portion -= diff_adj;
3387			}
3388			/*
3389			 * back down to the previous cwnd (assume we have
3390			 * had a sack before this packet). minus what ever
3391			 * portion of the overage is my fault.
3392			 */
3393			net->cwnd -= my_portion;
3394
3395			/* we will NOT back down more than 1 MTU */
3396			if (net->cwnd <= net->mtu) {
3397				net->cwnd = net->mtu;
3398			}
3399			/* force into CA */
3400			net->ssthresh = net->cwnd - 1;
3401		} else {
3402			/*
3403			 * Take 1/4 of the space left or max burst up ..
3404			 * whichever is less.
3405			 */
3406			incr = min((bw_avail - on_queue) >> 2,
3407			    stcb->asoc.max_burst * net->mtu);
3408			net->cwnd += incr;
3409		}
3410		if (net->cwnd > bw_avail) {
3411			/* We can't exceed the pipe size */
3412			net->cwnd = bw_avail;
3413		}
3414		if (net->cwnd < net->mtu) {
3415			/* We always have 1 MTU */
3416			net->cwnd = net->mtu;
3417		}
3418		if (net->cwnd - old_cwnd != 0) {
3419			/* log only changes */
3420			if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
3421				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
3422				    SCTP_CWND_LOG_FROM_SAT);
3423			}
3424		}
3425	}
3426}
3427
3428/*
3429 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
3430 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
3431 * offset: offset into the mbuf chain to first chunkhdr - length: is the
3432 * length of the complete packet outputs: - length: modified to remaining
3433 * length after control processing - netp: modified to new sctp_nets after
3434 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
3435 * bad packet,...) otherwise return the tcb for this packet
3436 */
3437#ifdef __GNUC__
3438__attribute__((noinline))
3439#endif
3440	static struct sctp_tcb *
3441	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
3442             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
3443             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
3444             uint32_t vrf_id)
3445{
3446	struct sctp_association *asoc;
3447	uint32_t vtag_in;
3448	int num_chunks = 0;	/* number of control chunks processed */
3449	uint32_t chk_length;
3450	int ret;
3451	int abort_no_unlock = 0;
3452
3453	/*
3454	 * How big should this be, and should it be alloc'd? Lets try the
3455	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
3456	 * until we get into jumbo grams and such..
3457	 */
3458	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
3459	struct sctp_tcb *locked_tcb = stcb;
3460	int got_auth = 0;
3461	uint32_t auth_offset = 0, auth_len = 0;
3462	int auth_skipped = 0;
3463
3464	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
3465	    iphlen, *offset, length, stcb);
3466
3467	/* validate chunk header length... */
3468	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
3469		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
3470		    ntohs(ch->chunk_length));
3471		if (locked_tcb) {
3472			SCTP_TCB_UNLOCK(locked_tcb);
3473		}
3474		return (NULL);
3475	}
3476	/*
3477	 * validate the verification tag
3478	 */
3479	vtag_in = ntohl(sh->v_tag);
3480
3481	if (locked_tcb) {
3482		SCTP_TCB_LOCK_ASSERT(locked_tcb);
3483	}
3484	if (ch->chunk_type == SCTP_INITIATION) {
3485		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
3486		    ntohs(ch->chunk_length), vtag_in);
3487		if (vtag_in != 0) {
3488			/* protocol error- silently discard... */
3489			SCTP_STAT_INCR(sctps_badvtag);
3490			if (locked_tcb) {
3491				SCTP_TCB_UNLOCK(locked_tcb);
3492			}
3493			return (NULL);
3494		}
3495	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
3496		/*
3497		 * If there is no stcb, skip the AUTH chunk and process
3498		 * later after a stcb is found (to validate the lookup was
3499		 * valid.
3500		 */
3501		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
3502		    (stcb == NULL) && !sctp_auth_disable) {
3503			/* save this chunk for later processing */
3504			auth_skipped = 1;
3505			auth_offset = *offset;
3506			auth_len = ntohs(ch->chunk_length);
3507
3508			/* (temporarily) move past this chunk */
3509			*offset += SCTP_SIZE32(auth_len);
3510			if (*offset >= length) {
3511				/* no more data left in the mbuf chain */
3512				*offset = length;
3513				if (locked_tcb) {
3514					SCTP_TCB_UNLOCK(locked_tcb);
3515				}
3516				return (NULL);
3517			}
3518			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3519			    sizeof(struct sctp_chunkhdr), chunk_buf);
3520		}
3521		if (ch == NULL) {
3522			/* Help */
3523			*offset = length;
3524			if (locked_tcb) {
3525				SCTP_TCB_UNLOCK(locked_tcb);
3526			}
3527			return (NULL);
3528		}
3529		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3530			goto process_control_chunks;
3531		}
3532		/*
3533		 * first check if it's an ASCONF with an unknown src addr we
3534		 * need to look inside to find the association
3535		 */
3536		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
3537			/* inp's refcount may be reduced */
3538			SCTP_INP_INCR_REF(inp);
3539
3540			stcb = sctp_findassociation_ep_asconf(m, iphlen,
3541			    *offset, sh, &inp, netp);
3542			if (stcb == NULL) {
3543				/*
3544				 * reduce inp's refcount if not reduced in
3545				 * sctp_findassociation_ep_asconf().
3546				 */
3547				SCTP_INP_DECR_REF(inp);
3548			}
3549			/* now go back and verify any auth chunk to be sure */
3550			if (auth_skipped && (stcb != NULL)) {
3551				struct sctp_auth_chunk *auth;
3552
3553				auth = (struct sctp_auth_chunk *)
3554				    sctp_m_getptr(m, auth_offset,
3555				    auth_len, chunk_buf);
3556				got_auth = 1;
3557				auth_skipped = 0;
3558				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
3559				    auth_offset)) {
3560					/* auth HMAC failed so dump it */
3561					*offset = length;
3562					if (locked_tcb) {
3563						SCTP_TCB_UNLOCK(locked_tcb);
3564					}
3565					return (NULL);
3566				} else {
3567					/* remaining chunks are HMAC checked */
3568					stcb->asoc.authenticated = 1;
3569				}
3570			}
3571		}
3572		if (stcb == NULL) {
3573			/* no association, so it's out of the blue... */
3574			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
3575			    vrf_id);
3576			*offset = length;
3577			if (locked_tcb) {
3578				SCTP_TCB_UNLOCK(locked_tcb);
3579			}
3580			return (NULL);
3581		}
3582		asoc = &stcb->asoc;
3583		/* ABORT and SHUTDOWN can use either v_tag... */
3584		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
3585		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
3586		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
3587			if ((vtag_in == asoc->my_vtag) ||
3588			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
3589			    (vtag_in == asoc->peer_vtag))) {
3590				/* this is valid */
3591			} else {
3592				/* drop this packet... */
3593				SCTP_STAT_INCR(sctps_badvtag);
3594				if (locked_tcb) {
3595					SCTP_TCB_UNLOCK(locked_tcb);
3596				}
3597				return (NULL);
3598			}
3599		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
3600			if (vtag_in != asoc->my_vtag) {
3601				/*
3602				 * this could be a stale SHUTDOWN-ACK or the
3603				 * peer never got the SHUTDOWN-COMPLETE and
3604				 * is still hung; we have started a new asoc
3605				 * but it won't complete until the shutdown
3606				 * is completed
3607				 */
3608				if (locked_tcb) {
3609					SCTP_TCB_UNLOCK(locked_tcb);
3610				}
3611				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
3612				    NULL, vrf_id);
3613				return (NULL);
3614			}
3615		} else {
3616			/* for all other chunks, vtag must match */
3617			if (vtag_in != asoc->my_vtag) {
3618				/* invalid vtag... */
3619				SCTPDBG(SCTP_DEBUG_INPUT3,
3620				    "invalid vtag: %xh, expect %xh\n",
3621				    vtag_in, asoc->my_vtag);
3622				SCTP_STAT_INCR(sctps_badvtag);
3623				if (locked_tcb) {
3624					SCTP_TCB_UNLOCK(locked_tcb);
3625				}
3626				*offset = length;
3627				return (NULL);
3628			}
3629		}
3630	}			/* end if !SCTP_COOKIE_ECHO */
3631	/*
3632	 * process all control chunks...
3633	 */
3634	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
3635	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
3636	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
3637		/* implied cookie-ack.. we must have lost the ack */
3638		stcb->asoc.overall_error_count = 0;
3639		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
3640		    *netp);
3641	}
3642process_control_chunks:
3643	while (IS_SCTP_CONTROL(ch)) {
3644		/* validate chunk length */
3645		chk_length = ntohs(ch->chunk_length);
3646		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
3647		    ch->chunk_type, chk_length);
3648		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
3649		if (chk_length < sizeof(*ch) ||
3650		    (*offset + (int)chk_length) > length) {
3651			*offset = length;
3652			if (locked_tcb) {
3653				SCTP_TCB_UNLOCK(locked_tcb);
3654			}
3655			return (NULL);
3656		}
3657		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
3658		/*
3659		 * INIT-ACK only gets the init ack "header" portion only
3660		 * because we don't have to process the peer's COOKIE. All
3661		 * others get a complete chunk.
3662		 */
3663		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
3664		    (ch->chunk_type == SCTP_INITIATION)) {
3665			/* get an init-ack chunk */
3666			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3667			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
3668			if (ch == NULL) {
3669				*offset = length;
3670				if (locked_tcb) {
3671					SCTP_TCB_UNLOCK(locked_tcb);
3672				}
3673				return (NULL);
3674			}
3675		} else {
3676			/*
3677			 * For cookies and all other chunks. if the
3678			 */
3679			if (chk_length > sizeof(chunk_buf)) {
3680				/*
3681				 * use just the size of the chunk buffer so
3682				 * the front part of our chunks fit in
3683				 * contiguous space up to the chunk buffer
3684				 * size (508 bytes). For chunks that need to
3685				 * get more than that they mus use the
3686				 * sctp_m_getptr() function or other means
3687				 * (know how to parse mbuf chains). Cookies
3688				 * do this already.
3689				 */
3690				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3691				    (sizeof(chunk_buf) - 4),
3692				    chunk_buf);
3693				if (ch == NULL) {
3694					*offset = length;
3695					if (locked_tcb) {
3696						SCTP_TCB_UNLOCK(locked_tcb);
3697					}
3698					return (NULL);
3699				}
3700			} else {
3701				/* We can fit it all */
3702				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3703				    chk_length, chunk_buf);
3704				if (ch == NULL) {
3705					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
3706					*offset = length;
3707					if (locked_tcb) {
3708						SCTP_TCB_UNLOCK(locked_tcb);
3709					}
3710					return (NULL);
3711				}
3712			}
3713		}
3714		num_chunks++;
3715		/* Save off the last place we got a control from */
3716		if (stcb != NULL) {
3717			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
3718				/*
3719				 * allow last_control to be NULL if
3720				 * ASCONF... ASCONF processing will find the
3721				 * right net later
3722				 */
3723				if ((netp != NULL) && (*netp != NULL))
3724					stcb->asoc.last_control_chunk_from = *netp;
3725			}
3726		}
3727#ifdef SCTP_AUDITING_ENABLED
3728		sctp_audit_log(0xB0, ch->chunk_type);
3729#endif
3730
3731		/* check to see if this chunk required auth, but isn't */
3732		if ((stcb != NULL) && !sctp_auth_disable &&
3733		    sctp_auth_is_required_chunk(ch->chunk_type,
3734		    stcb->asoc.local_auth_chunks) &&
3735		    !stcb->asoc.authenticated) {
3736			/* "silently" ignore */
3737			SCTP_STAT_INCR(sctps_recvauthmissing);
3738			goto next_chunk;
3739		}
3740		switch (ch->chunk_type) {
3741		case SCTP_INITIATION:
3742			/* must be first and only chunk */
3743			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
3744			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3745				/* We are not interested anymore? */
3746				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3747					/*
3748					 * collision case where we are
3749					 * sending to them too
3750					 */
3751					;
3752				} else {
3753					if (locked_tcb) {
3754						SCTP_TCB_UNLOCK(locked_tcb);
3755					}
3756					*offset = length;
3757					return (NULL);
3758				}
3759			}
3760			if ((num_chunks > 1) ||
3761			    (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3762				*offset = length;
3763				if (locked_tcb) {
3764					SCTP_TCB_UNLOCK(locked_tcb);
3765				}
3766				return (NULL);
3767			}
3768			if ((stcb != NULL) &&
3769			    (SCTP_GET_STATE(&stcb->asoc) ==
3770			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
3771				sctp_send_shutdown_ack(stcb,
3772				    stcb->asoc.primary_destination);
3773				*offset = length;
3774				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3775				if (locked_tcb) {
3776					SCTP_TCB_UNLOCK(locked_tcb);
3777				}
3778				return (NULL);
3779			}
3780			if (netp) {
3781				sctp_handle_init(m, iphlen, *offset, sh,
3782				    (struct sctp_init_chunk *)ch, inp,
3783				    stcb, *netp, &abort_no_unlock, vrf_id);
3784			}
3785			if (abort_no_unlock)
3786				return (NULL);
3787
3788			*offset = length;
3789			if (locked_tcb) {
3790				SCTP_TCB_UNLOCK(locked_tcb);
3791			}
3792			return (NULL);
3793			break;
3794		case SCTP_PAD_CHUNK:
3795			break;
3796		case SCTP_INITIATION_ACK:
3797			/* must be first and only chunk */
3798			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
3799			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3800				/* We are not interested anymore */
3801				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3802					;
3803				} else {
3804					if (locked_tcb) {
3805						SCTP_TCB_UNLOCK(locked_tcb);
3806					}
3807					*offset = length;
3808					if (stcb) {
3809						sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3810					}
3811					return (NULL);
3812				}
3813			}
3814			if ((num_chunks > 1) ||
3815			    (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3816				*offset = length;
3817				if (locked_tcb) {
3818					SCTP_TCB_UNLOCK(locked_tcb);
3819				}
3820				return (NULL);
3821			}
3822			if ((netp) && (*netp)) {
3823				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
3824				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
3825			} else {
3826				ret = -1;
3827			}
3828			/*
3829			 * Special case, I must call the output routine to
3830			 * get the cookie echoed
3831			 */
3832			if (abort_no_unlock)
3833				return (NULL);
3834
3835			if ((stcb) && ret == 0)
3836				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3837			*offset = length;
3838			if (locked_tcb) {
3839				SCTP_TCB_UNLOCK(locked_tcb);
3840			}
3841			return (NULL);
3842			break;
3843		case SCTP_SELECTIVE_ACK:
3844			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
3845			SCTP_STAT_INCR(sctps_recvsacks);
3846			{
3847				struct sctp_sack_chunk *sack;
3848				int abort_now = 0;
3849				uint32_t a_rwnd, cum_ack;
3850				uint16_t num_seg;
3851				int nonce_sum_flag;
3852
3853				if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
3854					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
3855					*offset = length;
3856					if (locked_tcb) {
3857						SCTP_TCB_UNLOCK(locked_tcb);
3858					}
3859					return (NULL);
3860				}
3861				sack = (struct sctp_sack_chunk *)ch;
3862				nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
3863				cum_ack = ntohl(sack->sack.cum_tsn_ack);
3864				num_seg = ntohs(sack->sack.num_gap_ack_blks);
3865				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
3866				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
3867				    cum_ack,
3868				    num_seg,
3869				    a_rwnd
3870				    );
3871				stcb->asoc.seen_a_sack_this_pkt = 1;
3872				if ((stcb->asoc.pr_sctp_cnt == 0) &&
3873				    (num_seg == 0) &&
3874				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
3875				    (cum_ack == stcb->asoc.last_acked_seq)) &&
3876				    (stcb->asoc.saw_sack_with_frags == 0) &&
3877				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
3878				    ) {
3879					/*
3880					 * We have a SIMPLE sack having no
3881					 * prior segments and data on sent
3882					 * queue to be acked.. Use the
3883					 * faster path sack processing. We
3884					 * also allow window update sacks
3885					 * with no missing segments to go
3886					 * this way too.
3887					 */
3888					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
3889					    &abort_now);
3890				} else {
3891					if (netp && *netp)
3892						sctp_handle_sack(sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
3893				}
3894				if (abort_now) {
3895					/* ABORT signal from sack processing */
3896					*offset = length;
3897					return (NULL);
3898				}
3899			}
3900			break;
3901		case SCTP_HEARTBEAT_REQUEST:
3902			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
3903			if ((stcb) && netp && *netp) {
3904				SCTP_STAT_INCR(sctps_recvheartbeat);
3905				sctp_send_heartbeat_ack(stcb, m, *offset,
3906				    chk_length, *netp);
3907
3908				/* He's alive so give him credit */
3909				stcb->asoc.overall_error_count = 0;
3910			}
3911			break;
3912		case SCTP_HEARTBEAT_ACK:
3913			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
3914			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
3915				/* Its not ours */
3916				*offset = length;
3917				if (locked_tcb) {
3918					SCTP_TCB_UNLOCK(locked_tcb);
3919				}
3920				return (NULL);
3921			}
3922			/* He's alive so give him credit */
3923			stcb->asoc.overall_error_count = 0;
3924			SCTP_STAT_INCR(sctps_recvheartbeatack);
3925			if (netp && *netp)
3926				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
3927				    stcb, *netp);
3928			break;
3929		case SCTP_ABORT_ASSOCIATION:
3930			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
3931			    stcb);
3932			if ((stcb) && netp && *netp)
3933				sctp_handle_abort((struct sctp_abort_chunk *)ch,
3934				    stcb, *netp);
3935			*offset = length;
3936			return (NULL);
3937			break;
3938		case SCTP_SHUTDOWN:
3939			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
3940			    stcb);
3941			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
3942				*offset = length;
3943				if (locked_tcb) {
3944					SCTP_TCB_UNLOCK(locked_tcb);
3945				}
3946				return (NULL);
3947
3948			}
3949			if (netp && *netp) {
3950				int abort_flag = 0;
3951
3952				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
3953				    stcb, *netp, &abort_flag);
3954				if (abort_flag) {
3955					*offset = length;
3956					return (NULL);
3957				}
3958			}
3959			break;
3960		case SCTP_SHUTDOWN_ACK:
3961			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
3962			if ((stcb) && (netp) && (*netp))
3963				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
3964			*offset = length;
3965			return (NULL);
3966			break;
3967
3968		case SCTP_OPERATION_ERROR:
3969			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
3970			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
3971
3972				*offset = length;
3973				return (NULL);
3974			}
3975			break;
3976		case SCTP_COOKIE_ECHO:
3977			SCTPDBG(SCTP_DEBUG_INPUT3,
3978			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
3979			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3980				;
3981			} else {
3982				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3983					/* We are not interested anymore */
3984					*offset = length;
3985					return (NULL);
3986				}
3987			}
3988			/*
3989			 * First are we accepting? We do this again here
3990			 * sincen it is possible that a previous endpoint
3991			 * WAS listening responded to a INIT-ACK and then
3992			 * closed. We opened and bound.. and are now no
3993			 * longer listening.
3994			 */
3995			if (inp->sctp_socket->so_qlimit == 0) {
3996				if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3997					/*
3998					 * special case, is this a retran'd
3999					 * COOKIE-ECHO or a restarting assoc
4000					 * that is a peeled off or
4001					 * one-to-one style socket.
4002					 */
4003					goto process_cookie_anyway;
4004				}
4005				sctp_abort_association(inp, stcb, m, iphlen,
4006				    sh, NULL, vrf_id);
4007				*offset = length;
4008				return (NULL);
4009			} else if (inp->sctp_socket->so_qlimit) {
4010				/* we are accepting so check limits like TCP */
4011				if (inp->sctp_socket->so_qlen >
4012				    inp->sctp_socket->so_qlimit) {
4013					/* no space */
4014					struct mbuf *oper;
4015					struct sctp_paramhdr *phdr;
4016
4017					if (sctp_abort_if_one_2_one_hits_limit) {
4018						oper = NULL;
4019						oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4020						    0, M_DONTWAIT, 1, MT_DATA);
4021						if (oper) {
4022							SCTP_BUF_LEN(oper) =
4023							    sizeof(struct sctp_paramhdr);
4024							phdr = mtod(oper,
4025							    struct sctp_paramhdr *);
4026							phdr->param_type =
4027							    htons(SCTP_CAUSE_OUT_OF_RESC);
4028							phdr->param_length =
4029							    htons(sizeof(struct sctp_paramhdr));
4030						}
4031						sctp_abort_association(inp, stcb, m,
4032						    iphlen, sh, oper, vrf_id);
4033					}
4034					*offset = length;
4035					return (NULL);
4036				}
4037			}
4038	process_cookie_anyway:
4039			{
4040				struct mbuf *ret_buf;
4041				struct sctp_inpcb *linp;
4042
4043				if (stcb) {
4044					linp = NULL;
4045				} else {
4046					linp = inp;
4047				}
4048
4049				if (linp) {
4050					SCTP_ASOC_CREATE_LOCK(linp);
4051				}
4052				if (netp) {
4053					ret_buf =
4054					    sctp_handle_cookie_echo(m, iphlen,
4055					    *offset, sh,
4056					    (struct sctp_cookie_echo_chunk *)ch,
4057					    &inp, &stcb, netp,
4058					    auth_skipped,
4059					    auth_offset,
4060					    auth_len,
4061					    &locked_tcb,
4062					    vrf_id);
4063				} else {
4064					ret_buf = NULL;
4065				}
4066				if (linp) {
4067					SCTP_ASOC_CREATE_UNLOCK(linp);
4068				}
4069				if (ret_buf == NULL) {
4070					if (locked_tcb) {
4071						SCTP_TCB_UNLOCK(locked_tcb);
4072					}
4073					SCTPDBG(SCTP_DEBUG_INPUT3,
4074					    "GAK, null buffer\n");
4075					auth_skipped = 0;
4076					*offset = length;
4077					return (NULL);
4078				}
4079				/* if AUTH skipped, see if it verified... */
4080				if (auth_skipped) {
4081					got_auth = 1;
4082					auth_skipped = 0;
4083				}
4084				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4085					/*
4086					 * Restart the timer if we have
4087					 * pending data
4088					 */
4089					struct sctp_tmit_chunk *chk;
4090
4091					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4092					if (chk) {
4093						sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4094						    stcb->sctp_ep, stcb,
4095						    chk->whoTo);
4096					}
4097				}
4098			}
4099			break;
4100		case SCTP_COOKIE_ACK:
4101			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4102			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4103				if (locked_tcb) {
4104					SCTP_TCB_UNLOCK(locked_tcb);
4105				}
4106				return (NULL);
4107			}
4108			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4109				/* We are not interested anymore */
4110				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4111					;
4112				} else if (stcb) {
4113					sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4114					*offset = length;
4115					return (NULL);
4116				}
4117			}
4118			/* He's alive so give him credit */
4119			if ((stcb) && netp && *netp) {
4120				stcb->asoc.overall_error_count = 0;
4121				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4122			}
4123			break;
4124		case SCTP_ECN_ECHO:
4125			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4126			/* He's alive so give him credit */
4127			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4128				/* Its not ours */
4129				if (locked_tcb) {
4130					SCTP_TCB_UNLOCK(locked_tcb);
4131				}
4132				*offset = length;
4133				return (NULL);
4134			}
4135			if (stcb) {
4136				stcb->asoc.overall_error_count = 0;
4137				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
4138				    stcb);
4139			}
4140			break;
4141		case SCTP_ECN_CWR:
4142			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
4143			/* He's alive so give him credit */
4144			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
4145				/* Its not ours */
4146				if (locked_tcb) {
4147					SCTP_TCB_UNLOCK(locked_tcb);
4148				}
4149				*offset = length;
4150				return (NULL);
4151			}
4152			if (stcb) {
4153				stcb->asoc.overall_error_count = 0;
4154				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
4155			}
4156			break;
4157		case SCTP_SHUTDOWN_COMPLETE:
4158			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
4159			/* must be first and only chunk */
4160			if ((num_chunks > 1) ||
4161			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4162				*offset = length;
4163				if (locked_tcb) {
4164					SCTP_TCB_UNLOCK(locked_tcb);
4165				}
4166				return (NULL);
4167			}
4168			if ((stcb) && netp && *netp) {
4169				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
4170				    stcb, *netp);
4171			}
4172			*offset = length;
4173			return (NULL);
4174			break;
4175		case SCTP_ASCONF:
4176			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
4177			/* He's alive so give him credit */
4178			if (stcb) {
4179				stcb->asoc.overall_error_count = 0;
4180				sctp_handle_asconf(m, *offset,
4181				    (struct sctp_asconf_chunk *)ch, stcb);
4182			}
4183			break;
4184		case SCTP_ASCONF_ACK:
4185			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
4186			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
4187				/* Its not ours */
4188				if (locked_tcb) {
4189					SCTP_TCB_UNLOCK(locked_tcb);
4190				}
4191				*offset = length;
4192				return (NULL);
4193			}
4194			if ((stcb) && netp && *netp) {
4195				/* He's alive so give him credit */
4196				stcb->asoc.overall_error_count = 0;
4197				sctp_handle_asconf_ack(m, *offset,
4198				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp);
4199			}
4200			break;
4201		case SCTP_FORWARD_CUM_TSN:
4202			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
4203			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
4204				/* Its not ours */
4205				if (locked_tcb) {
4206					SCTP_TCB_UNLOCK(locked_tcb);
4207				}
4208				*offset = length;
4209				return (NULL);
4210			}
4211			/* He's alive so give him credit */
4212			if (stcb) {
4213				int abort_flag = 0;
4214
4215				stcb->asoc.overall_error_count = 0;
4216				*fwd_tsn_seen = 1;
4217				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4218					/* We are not interested anymore */
4219					sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
4220					*offset = length;
4221					return (NULL);
4222				}
4223				sctp_handle_forward_tsn(stcb,
4224				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag);
4225				if (abort_flag) {
4226					*offset = length;
4227					return (NULL);
4228				} else {
4229					stcb->asoc.overall_error_count = 0;
4230				}
4231
4232			}
4233			break;
4234		case SCTP_STREAM_RESET:
4235			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
4236			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4237			    chk_length, chunk_buf);
4238			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
4239				/* Its not ours */
4240				if (locked_tcb) {
4241					SCTP_TCB_UNLOCK(locked_tcb);
4242				}
4243				*offset = length;
4244				return (NULL);
4245			}
4246			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4247				/* We are not interested anymore */
4248				sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4249				*offset = length;
4250				return (NULL);
4251			}
4252			if (stcb->asoc.peer_supports_strreset == 0) {
4253				/*
4254				 * hmm, peer should have announced this, but
4255				 * we will turn it on since he is sending us
4256				 * a stream reset.
4257				 */
4258				stcb->asoc.peer_supports_strreset = 1;
4259			}
4260			if (sctp_handle_stream_reset(stcb, (struct sctp_stream_reset_out_req *)ch)) {
4261				/* stop processing */
4262				*offset = length;
4263				return (NULL);
4264			}
4265			break;
4266		case SCTP_PACKET_DROPPED:
4267			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
4268			/* re-get it all please */
4269			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
4270				/* Its not ours */
4271				if (locked_tcb) {
4272					SCTP_TCB_UNLOCK(locked_tcb);
4273				}
4274				*offset = length;
4275				return (NULL);
4276			}
4277			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4278			    chk_length, chunk_buf);
4279
4280			if (ch && (stcb) && netp && (*netp)) {
4281				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
4282				    stcb, *netp);
4283			}
4284			break;
4285
4286		case SCTP_AUTHENTICATION:
4287			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
4288			if (sctp_auth_disable)
4289				goto unknown_chunk;
4290
4291			if (stcb == NULL) {
4292				/* save the first AUTH for later processing */
4293				if (auth_skipped == 0) {
4294					auth_offset = *offset;
4295					auth_len = chk_length;
4296					auth_skipped = 1;
4297				}
4298				/* skip this chunk (temporarily) */
4299				goto next_chunk;
4300			}
4301			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
4302			    (chk_length > (sizeof(struct sctp_auth_chunk) +
4303			    SCTP_AUTH_DIGEST_LEN_MAX))) {
4304				/* Its not ours */
4305				if (locked_tcb) {
4306					SCTP_TCB_UNLOCK(locked_tcb);
4307				}
4308				*offset = length;
4309				return (NULL);
4310			}
4311			if (got_auth == 1) {
4312				/* skip this chunk... it's already auth'd */
4313				goto next_chunk;
4314			}
4315			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4316			    chk_length, chunk_buf);
4317			got_auth = 1;
4318			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
4319			    m, *offset)) {
4320				/* auth HMAC failed so dump the packet */
4321				*offset = length;
4322				return (stcb);
4323			} else {
4324				/* remaining chunks are HMAC checked */
4325				stcb->asoc.authenticated = 1;
4326			}
4327			break;
4328
4329		default:
4330	unknown_chunk:
4331			/* it's an unknown chunk! */
4332			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
4333				struct mbuf *mm;
4334				struct sctp_paramhdr *phd;
4335
4336				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4337				    0, M_DONTWAIT, 1, MT_DATA);
4338				if (mm) {
4339					phd = mtod(mm, struct sctp_paramhdr *);
4340					/*
4341					 * We cheat and use param type since
4342					 * we did not bother to define a
4343					 * error cause struct. They are the
4344					 * same basic format with different
4345					 * names.
4346					 */
4347					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
4348					phd->param_length = htons(chk_length + sizeof(*phd));
4349					SCTP_BUF_LEN(mm) = sizeof(*phd);
4350					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
4351					    M_DONTWAIT);
4352					if (SCTP_BUF_NEXT(mm)) {
4353						sctp_queue_op_err(stcb, mm);
4354					} else {
4355						sctp_m_freem(mm);
4356					}
4357				}
4358			}
4359			if ((ch->chunk_type & 0x80) == 0) {
4360				/* discard this packet */
4361				*offset = length;
4362				return (stcb);
4363			}	/* else skip this bad chunk and continue... */
4364			break;
4365		}		/* switch (ch->chunk_type) */
4366
4367
4368next_chunk:
4369		/* get the next chunk */
4370		*offset += SCTP_SIZE32(chk_length);
4371		if (*offset >= length) {
4372			/* no more data left in the mbuf chain */
4373			break;
4374		}
4375		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4376		    sizeof(struct sctp_chunkhdr), chunk_buf);
4377		if (ch == NULL) {
4378			if (locked_tcb) {
4379				SCTP_TCB_UNLOCK(locked_tcb);
4380			}
4381			*offset = length;
4382			return (NULL);
4383		}
4384	}			/* while */
4385	return (stcb);
4386}
4387
4388
4389/*
4390 * Process the ECN bits we have something set so we must look to see if it is
4391 * ECN(0) or ECN(1) or CE
4392 */
4393static __inline void
4394sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
4395    uint8_t ecn_bits)
4396{
4397	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4398		;
4399	} else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
4400		/*
4401		 * we only add to the nonce sum for ECT1, ECT0 does not
4402		 * change the NS bit (that we have yet to find a way to send
4403		 * it yet).
4404		 */
4405
4406		/* ECN Nonce stuff */
4407		stcb->asoc.receiver_nonce_sum++;
4408		stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
4409
4410		/*
4411		 * Drag up the last_echo point if cumack is larger since we
4412		 * don't want the point falling way behind by more than
4413		 * 2^^31 and then having it be incorrect.
4414		 */
4415		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4416		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4417			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4418		}
4419	} else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
4420		/*
4421		 * Drag up the last_echo point if cumack is larger since we
4422		 * don't want the point falling way behind by more than
4423		 * 2^^31 and then having it be incorrect.
4424		 */
4425		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4426		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4427			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4428		}
4429	}
4430}
4431
4432static __inline void
4433sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
4434    uint32_t high_tsn, uint8_t ecn_bits)
4435{
4436	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4437		/*
4438		 * we possibly must notify the sender that a congestion
4439		 * window reduction is in order. We do this by adding a ECNE
4440		 * chunk to the output chunk queue. The incoming CWR will
4441		 * remove this chunk.
4442		 */
4443		if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
4444		    MAX_TSN)) {
4445			/* Yep, we need to add a ECNE */
4446			sctp_send_ecn_echo(stcb, net, high_tsn);
4447			stcb->asoc.last_echo_tsn = high_tsn;
4448		}
4449	}
4450}
4451
4452/*
4453 * common input chunk processing (v4 and v6)
4454 */
4455void
4456sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
4457    int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
4458    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
4459    uint8_t ecn_bits, uint32_t vrf_id)
4460{
4461	/*
4462	 * Control chunk processing
4463	 */
4464	uint32_t high_tsn;
4465	int fwd_tsn_seen = 0, data_processed = 0;
4466	struct mbuf *m = *mm;
4467	int abort_flag = 0;
4468	int un_sent;
4469
4470	SCTP_STAT_INCR(sctps_recvdatagrams);
4471#ifdef SCTP_AUDITING_ENABLED
4472	sctp_audit_log(0xE0, 1);
4473	sctp_auditing(0, inp, stcb, net);
4474#endif
4475
4476	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n",
4477	    m, iphlen, offset);
4478
4479	if (stcb) {
4480		/* always clear this before beginning a packet */
4481		stcb->asoc.authenticated = 0;
4482		stcb->asoc.seen_a_sack_this_pkt = 0;
4483	}
4484	if (IS_SCTP_CONTROL(ch)) {
4485		/* process the control portion of the SCTP packet */
4486		/* sa_ignore NO_NULL_CHK */
4487		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
4488		    inp, stcb, &net, &fwd_tsn_seen, vrf_id);
4489		if (stcb) {
4490			/*
4491			 * This covers us if the cookie-echo was there and
4492			 * it changes our INP.
4493			 */
4494			inp = stcb->sctp_ep;
4495		}
4496	} else {
4497		/*
4498		 * no control chunks, so pre-process DATA chunks (these
4499		 * checks are taken care of by control processing)
4500		 */
4501
4502		/*
4503		 * if DATA only packet, and auth is required, then punt...
4504		 * can't have authenticated without any AUTH (control)
4505		 * chunks
4506		 */
4507		if ((stcb != NULL) && !sctp_auth_disable &&
4508		    sctp_auth_is_required_chunk(SCTP_DATA,
4509		    stcb->asoc.local_auth_chunks)) {
4510			/* "silently" ignore */
4511			SCTP_STAT_INCR(sctps_recvauthmissing);
4512			SCTP_TCB_UNLOCK(stcb);
4513			return;
4514		}
4515		if (stcb == NULL) {
4516			/* out of the blue DATA chunk */
4517			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4518			    vrf_id);
4519			return;
4520		}
4521		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
4522			/* v_tag mismatch! */
4523			SCTP_STAT_INCR(sctps_badvtag);
4524			SCTP_TCB_UNLOCK(stcb);
4525			return;
4526		}
4527	}
4528
4529	if (stcb == NULL) {
4530		/*
4531		 * no valid TCB for this packet, or we found it's a bad
4532		 * packet while processing control, or we're done with this
4533		 * packet (done or skip rest of data), so we drop it...
4534		 */
4535		return;
4536	}
4537	/*
4538	 * DATA chunk processing
4539	 */
4540	/* plow through the data chunks while length > offset */
4541
4542	/*
4543	 * Rest should be DATA only.  Check authentication state if AUTH for
4544	 * DATA is required.
4545	 */
4546	if ((length > offset) && (stcb != NULL) && !sctp_auth_disable &&
4547	    sctp_auth_is_required_chunk(SCTP_DATA,
4548	    stcb->asoc.local_auth_chunks) &&
4549	    !stcb->asoc.authenticated) {
4550		/* "silently" ignore */
4551		SCTP_STAT_INCR(sctps_recvauthmissing);
4552		SCTPDBG(SCTP_DEBUG_AUTH1,
4553		    "Data chunk requires AUTH, skipped\n");
4554		goto trigger_send;
4555	}
4556	if (length > offset) {
4557		int retval;
4558
4559		/*
4560		 * First check to make sure our state is correct. We would
4561		 * not get here unless we really did have a tag, so we don't
4562		 * abort if this happens, just dump the chunk silently.
4563		 */
4564		switch (SCTP_GET_STATE(&stcb->asoc)) {
4565		case SCTP_STATE_COOKIE_ECHOED:
4566			/*
4567			 * we consider data with valid tags in this state
4568			 * shows us the cookie-ack was lost. Imply it was
4569			 * there.
4570			 */
4571			stcb->asoc.overall_error_count = 0;
4572			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
4573			break;
4574		case SCTP_STATE_COOKIE_WAIT:
4575			/*
4576			 * We consider OOTB any data sent during asoc setup.
4577			 */
4578			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4579			    vrf_id);
4580			SCTP_TCB_UNLOCK(stcb);
4581			return;
4582			break;
4583		case SCTP_STATE_EMPTY:	/* should not happen */
4584		case SCTP_STATE_INUSE:	/* should not happen */
4585		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
4586		case SCTP_STATE_SHUTDOWN_ACK_SENT:
4587		default:
4588			SCTP_TCB_UNLOCK(stcb);
4589			return;
4590			break;
4591		case SCTP_STATE_OPEN:
4592		case SCTP_STATE_SHUTDOWN_SENT:
4593			break;
4594		}
4595		/* take care of ECN, part 1. */
4596		if (stcb->asoc.ecn_allowed &&
4597		    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4598			sctp_process_ecn_marked_a(stcb, net, ecn_bits);
4599		}
4600		/* plow through the data chunks while length > offset */
4601		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
4602		    inp, stcb, net, &high_tsn);
4603		if (retval == 2) {
4604			/*
4605			 * The association aborted, NO UNLOCK needed since
4606			 * the association is destroyed.
4607			 */
4608			return;
4609		}
4610		data_processed = 1;
4611		if (retval == 0) {
4612			/* take care of ecn part 2. */
4613			if (stcb->asoc.ecn_allowed &&
4614			    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4615				sctp_process_ecn_marked_b(stcb, net, high_tsn,
4616				    ecn_bits);
4617			}
4618		}
4619		/*
4620		 * Anything important needs to have been m_copy'ed in
4621		 * process_data
4622		 */
4623	}
4624	if ((data_processed == 0) && (fwd_tsn_seen)) {
4625		int was_a_gap = 0;
4626
4627		if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
4628		    stcb->asoc.cumulative_tsn, MAX_TSN)) {
4629			/* there was a gap before this data was processed */
4630			was_a_gap = 1;
4631		}
4632		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
4633		if (abort_flag) {
4634			/* Again, we aborted so NO UNLOCK needed */
4635			return;
4636		}
4637	}
4638	/* trigger send of any chunks in queue... */
4639trigger_send:
4640#ifdef SCTP_AUDITING_ENABLED
4641	sctp_audit_log(0xE0, 2);
4642	sctp_auditing(1, inp, stcb, net);
4643#endif
4644	SCTPDBG(SCTP_DEBUG_INPUT1,
4645	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
4646	    stcb->asoc.peers_rwnd,
4647	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
4648	    stcb->asoc.total_flight);
4649	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
4650
4651	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
4652	    ((un_sent) &&
4653	    (stcb->asoc.peers_rwnd > 0 ||
4654	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
4655		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
4656		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
4657		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
4658	}
4659#ifdef SCTP_AUDITING_ENABLED
4660	sctp_audit_log(0xE0, 3);
4661	sctp_auditing(2, inp, stcb, net);
4662#endif
4663	SCTP_TCB_UNLOCK(stcb);
4664	return;
4665}
4666
4667
4668
4669void
4670sctp_input(i_pak, off)
4671	struct mbuf *i_pak;
4672	int off;
4673
4674{
4675#ifdef SCTP_MBUF_LOGGING
4676	struct mbuf *mat;
4677
4678#endif
4679	struct mbuf *m;
4680	int iphlen;
4681	uint32_t vrf_id = 0;
4682	uint8_t ecn_bits;
4683	struct ip *ip;
4684	struct sctphdr *sh;
4685	struct sctp_inpcb *inp = NULL;
4686
4687	uint32_t check, calc_check;
4688	struct sctp_nets *net;
4689	struct sctp_tcb *stcb = NULL;
4690	struct sctp_chunkhdr *ch;
4691	int refcount_up = 0;
4692	int length, mlen, offset;
4693
4694
4695	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
4696		SCTP_RELEASE_PKT(i_pak);
4697		return;
4698	}
4699	mlen = SCTP_HEADER_LEN(i_pak);
4700	iphlen = off;
4701	m = SCTP_HEADER_TO_CHAIN(i_pak);
4702
4703	net = NULL;
4704	SCTP_STAT_INCR(sctps_recvpackets);
4705	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
4706
4707
4708#ifdef SCTP_MBUF_LOGGING
4709	/* Log in any input mbufs */
4710	if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
4711		mat = m;
4712		while (mat) {
4713			if (SCTP_BUF_IS_EXTENDED(mat)) {
4714				sctp_log_mb(mat, SCTP_MBUF_INPUT);
4715			}
4716			mat = SCTP_BUF_NEXT(mat);
4717		}
4718	}
4719#endif
4720#ifdef  SCTP_PACKET_LOGGING
4721	sctp_packet_log(m, mlen);
4722#endif
4723	/*
4724	 * Must take out the iphlen, since mlen expects this (only effect lb
4725	 * case)
4726	 */
4727	mlen -= iphlen;
4728
4729	/*
4730	 * Get IP, SCTP, and first chunk header together in first mbuf.
4731	 */
4732	ip = mtod(m, struct ip *);
4733	offset = iphlen + sizeof(*sh) + sizeof(*ch);
4734	if (SCTP_BUF_LEN(m) < offset) {
4735		if ((m = m_pullup(m, offset)) == 0) {
4736			SCTP_STAT_INCR(sctps_hdrops);
4737			return;
4738		}
4739		ip = mtod(m, struct ip *);
4740	}
4741	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
4742	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
4743
4744	/* SCTP does not allow broadcasts or multicasts */
4745	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
4746		goto bad;
4747	}
4748	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
4749		/*
4750		 * We only look at broadcast if its a front state, All
4751		 * others we will not have a tcb for anyway.
4752		 */
4753		goto bad;
4754	}
4755	/* validate SCTP checksum */
4756	if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) {
4757		/*
4758		 * we do NOT validate things from the loopback if the sysctl
4759		 * is set to 1.
4760		 */
4761		check = sh->checksum;	/* save incoming checksum */
4762		if ((check == 0) && (sctp_no_csum_on_loopback)) {
4763			/*
4764			 * special hook for where we got a local address
4765			 * somehow routed across a non IFT_LOOP type
4766			 * interface
4767			 */
4768			if (ip->ip_src.s_addr == ip->ip_dst.s_addr)
4769				goto sctp_skip_csum_4;
4770		}
4771		sh->checksum = 0;	/* prepare for calc */
4772		calc_check = sctp_calculate_sum(m, &mlen, iphlen);
4773		if (calc_check != check) {
4774			SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
4775			    calc_check, check, m, mlen, iphlen);
4776
4777			stcb = sctp_findassociation_addr(m, iphlen,
4778			    offset - sizeof(*ch),
4779			    sh, ch, &inp, &net,
4780			    vrf_id);
4781			if ((inp) && (stcb)) {
4782				sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
4783				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR);
4784			} else if ((inp != NULL) && (stcb == NULL)) {
4785				refcount_up = 1;
4786			}
4787			SCTP_STAT_INCR(sctps_badsum);
4788			SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
4789			goto bad;
4790		}
4791		sh->checksum = calc_check;
4792	}
4793sctp_skip_csum_4:
4794	/* destination port of 0 is illegal, based on RFC2960. */
4795	if (sh->dest_port == 0) {
4796		SCTP_STAT_INCR(sctps_hdrops);
4797		goto bad;
4798	}
4799	/* validate mbuf chain length with IP payload length */
4800	if (mlen < (ip->ip_len - iphlen)) {
4801		SCTP_STAT_INCR(sctps_hdrops);
4802		goto bad;
4803	}
4804	/*
4805	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
4806	 * IP/SCTP/first chunk header...
4807	 */
4808	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
4809	    sh, ch, &inp, &net, vrf_id);
4810	/* inp's ref-count increased && stcb locked */
4811	if (inp == NULL) {
4812		struct sctp_init_chunk *init_chk, chunk_buf;
4813
4814		SCTP_STAT_INCR(sctps_noport);
4815#ifdef ICMP_BANDLIM
4816		/*
4817		 * we use the bandwidth limiting to protect against sending
4818		 * too many ABORTS all at once. In this case these count the
4819		 * same as an ICMP message.
4820		 */
4821		if (badport_bandlim(0) < 0)
4822			goto bad;
4823#endif				/* ICMP_BANDLIM */
4824		SCTPDBG(SCTP_DEBUG_INPUT1,
4825		    "Sending a ABORT from packet entry!\n");
4826		if (ch->chunk_type == SCTP_INITIATION) {
4827			/*
4828			 * we do a trick here to get the INIT tag, dig in
4829			 * and get the tag from the INIT and put it in the
4830			 * common header.
4831			 */
4832			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4833			    iphlen + sizeof(*sh), sizeof(*init_chk),
4834			    (uint8_t *) & chunk_buf);
4835			if (init_chk != NULL)
4836				sh->v_tag = init_chk->init.initiate_tag;
4837		}
4838		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4839			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
4840			goto bad;
4841		}
4842		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
4843			goto bad;
4844		}
4845		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
4846			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id);
4847		goto bad;
4848	} else if (stcb == NULL) {
4849		refcount_up = 1;
4850	}
4851#ifdef IPSEC
4852	/*
4853	 * I very much doubt any of the IPSEC stuff will work but I have no
4854	 * idea, so I will leave it in place.
4855	 */
4856
4857	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
4858		ipsecstat.in_polvio++;
4859		SCTP_STAT_INCR(sctps_hdrops);
4860		goto bad;
4861	}
4862#endif				/* IPSEC */
4863
4864	/*
4865	 * common chunk processing
4866	 */
4867	length = ip->ip_len + iphlen;
4868	offset -= sizeof(struct sctp_chunkhdr);
4869
4870	ecn_bits = ip->ip_tos;
4871
4872	/* sa_ignore NO_NULL_CHK */
4873	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
4874	    inp, stcb, net, ecn_bits, vrf_id);
4875	/* inp's ref-count reduced && stcb unlocked */
4876	if (m) {
4877		sctp_m_freem(m);
4878	}
4879	if ((inp) && (refcount_up)) {
4880		/* reduce ref-count */
4881		SCTP_INP_WLOCK(inp);
4882		SCTP_INP_DECR_REF(inp);
4883		SCTP_INP_WUNLOCK(inp);
4884	}
4885	return;
4886bad:
4887	if (stcb) {
4888		SCTP_TCB_UNLOCK(stcb);
4889	}
4890	if ((inp) && (refcount_up)) {
4891		/* reduce ref-count */
4892		SCTP_INP_WLOCK(inp);
4893		SCTP_INP_DECR_REF(inp);
4894		SCTP_INP_WUNLOCK(inp);
4895	}
4896	if (m) {
4897		sctp_m_freem(m);
4898	}
4899	return;
4900}
4901