sctp_input.c revision 170181
1251881Speter/*-
2251881Speter * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3251881Speter *
4251881Speter * Redistribution and use in source and binary forms, with or without
5251881Speter * modification, are permitted provided that the following conditions are met:
6251881Speter *
7251881Speter * a) Redistributions of source code must retain the above copyright notice,
8251881Speter *   this list of conditions and the following disclaimer.
9251881Speter *
10251881Speter * b) Redistributions in binary form must reproduce the above copyright
11251881Speter *    notice, this list of conditions and the following disclaimer in
12251881Speter *   the documentation and/or other materials provided with the distribution.
13251881Speter *
14251881Speter * c) Neither the name of Cisco Systems, Inc. nor the names of its
15251881Speter *    contributors may be used to endorse or promote products derived
16251881Speter *    from this software without specific prior written permission.
17251881Speter *
18251881Speter * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19251881Speter * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20251881Speter * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21251881Speter * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22251881Speter * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23251881Speter * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24251881Speter * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25251881Speter * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26251881Speter * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27251881Speter * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28251881Speter * THE POSSIBILITY OF SUCH DAMAGE.
29251881Speter */
30251881Speter
31251881Speter/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
32251881Speter
33251881Speter#include <sys/cdefs.h>
34251881Speter__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 170181 2007-06-01 11:19:54Z rrs $");
35251881Speter
36251881Speter#include <netinet/sctp_os.h>
37251881Speter#include <netinet/sctp_var.h>
38251881Speter#include <netinet/sctp_sysctl.h>
39251881Speter#include <netinet/sctp_pcb.h>
40251881Speter#include <netinet/sctp_header.h>
41251881Speter#include <netinet/sctputil.h>
42251881Speter#include <netinet/sctp_output.h>
43251881Speter#include <netinet/sctp_input.h>
44251881Speter#include <netinet/sctp_auth.h>
45251881Speter#include <netinet/sctp_indata.h>
46251881Speter#include <netinet/sctp_asconf.h>
47251881Speter#include <netinet/sctp_bsd_addr.h>
48251881Speter
49251881Speter
50251881Speter
51251881Speterstatic void
52251881Spetersctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
53251881Speter{
54251881Speter	struct sctp_nets *net;
55251881Speter
56251881Speter	/*
57251881Speter	 * This now not only stops all cookie timers it also stops any INIT
58251881Speter	 * timers as well. This will make sure that the timers are stopped
59251881Speter	 * in all collision cases.
60251881Speter	 */
61	SCTP_TCB_LOCK_ASSERT(stcb);
62	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
63		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
64			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
65			    stcb->sctp_ep,
66			    stcb,
67			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
68		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
69			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
70			    stcb->sctp_ep,
71			    stcb,
72			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
73		}
74	}
75}
76
77/* INIT handler */
78static void
79sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
80    struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
81    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
82{
83	struct sctp_init *init;
84	struct mbuf *op_err;
85	uint32_t init_limit;
86
87	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
88	    stcb);
89	op_err = NULL;
90	init = &cp->init;
91	/* First are we accepting? */
92	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
93		SCTPDBG(SCTP_DEBUG_INPUT2,
94		    "sctp_handle_init: Abort, so_qlimit:%d\n",
95		    inp->sctp_socket->so_qlimit);
96		/*
97		 * FIX ME ?? What about TCP model and we have a
98		 * match/restart case?
99		 */
100		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
101		    vrf_id);
102		if (stcb)
103			*abort_no_unlock = 1;
104		return;
105	}
106	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
107		/* Invalid length */
108		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
109		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
110		    vrf_id);
111		if (stcb)
112			*abort_no_unlock = 1;
113		return;
114	}
115	/* validate parameters */
116	if (init->initiate_tag == 0) {
117		/* protocol error... send abort */
118		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
119		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
120		    vrf_id);
121		if (stcb)
122			*abort_no_unlock = 1;
123		return;
124	}
125	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
126		/* invalid parameter... send abort */
127		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
128		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
129		    vrf_id);
130		return;
131	}
132	if (init->num_inbound_streams == 0) {
133		/* protocol error... send abort */
134		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
135		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
136		    vrf_id);
137		if (stcb)
138			*abort_no_unlock = 1;
139		return;
140	}
141	if (init->num_outbound_streams == 0) {
142		/* protocol error... send abort */
143		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
144		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
145		    vrf_id);
146		if (stcb)
147			*abort_no_unlock = 1;
148		return;
149	}
150	init_limit = offset + ntohs(cp->ch.chunk_length);
151	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
152	    init_limit)) {
153		/* auth parameter(s) error... send abort */
154		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id);
155		if (stcb)
156			*abort_no_unlock = 1;
157		return;
158	}
159	/* send an INIT-ACK w/cookie */
160	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
161	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id);
162}
163
164/*
165 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
166 */
167static int
168sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
169    struct sctp_nets *net)
170{
171	struct sctp_init *init;
172	struct sctp_association *asoc;
173	struct sctp_nets *lnet;
174	unsigned int i;
175
176	init = &cp->init;
177	asoc = &stcb->asoc;
178	/* save off parameters */
179	asoc->peer_vtag = ntohl(init->initiate_tag);
180	asoc->peers_rwnd = ntohl(init->a_rwnd);
181	if (TAILQ_FIRST(&asoc->nets)) {
182		/* update any ssthresh's that may have a default */
183		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
184			lnet->ssthresh = asoc->peers_rwnd;
185
186#if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING)
187			sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
188#endif
189
190		}
191	}
192	SCTP_TCB_SEND_LOCK(stcb);
193	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
194		unsigned int newcnt;
195		struct sctp_stream_out *outs;
196		struct sctp_stream_queue_pending *sp;
197
198		/* cut back on number of streams */
199		newcnt = ntohs(init->num_inbound_streams);
200		/* This if is probably not needed but I am cautious */
201		if (asoc->strmout) {
202			/* First make sure no data chunks are trapped */
203			for (i = newcnt; i < asoc->pre_open_streams; i++) {
204				outs = &asoc->strmout[i];
205				sp = TAILQ_FIRST(&outs->outqueue);
206				while (sp) {
207					TAILQ_REMOVE(&outs->outqueue, sp,
208					    next);
209					asoc->stream_queue_cnt--;
210					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
211					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
212					    sp);
213					if (sp->data) {
214						sctp_m_freem(sp->data);
215						sp->data = NULL;
216					}
217					sctp_free_remote_addr(sp->net);
218					sp->net = NULL;
219					/* Free the chunk */
220					SCTP_PRINTF("sp:%p tcb:%p weird free case\n",
221					    sp, stcb);
222
223					sctp_free_a_strmoq(stcb, sp);
224					/* sa_ignore FREED_MEMORY */
225					sp = TAILQ_FIRST(&outs->outqueue);
226				}
227			}
228		}
229		/* cut back the count and abandon the upper streams */
230		asoc->pre_open_streams = newcnt;
231	}
232	SCTP_TCB_SEND_UNLOCK(stcb);
233	asoc->streamoutcnt = asoc->pre_open_streams;
234	/* init tsn's */
235	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
236#ifdef SCTP_MAP_LOGGING
237	sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
238#endif
239	/* This is the next one we expect */
240	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
241
242	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
243	asoc->cumulative_tsn = asoc->asconf_seq_in;
244	asoc->last_echo_tsn = asoc->asconf_seq_in;
245	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
246	/* open the requested streams */
247
248	if (asoc->strmin != NULL) {
249		/* Free the old ones */
250		struct sctp_queued_to_read *ctl;
251
252		for (i = 0; i < asoc->streamincnt; i++) {
253			ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
254			while (ctl) {
255				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
256				sctp_free_remote_addr(ctl->whoFrom);
257				sctp_m_freem(ctl->data);
258				ctl->data = NULL;
259				sctp_free_a_readq(stcb, ctl);
260				ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
261			}
262		}
263		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
264	}
265	asoc->streamincnt = ntohs(init->num_outbound_streams);
266	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
267		asoc->streamincnt = MAX_SCTP_STREAMS;
268	}
269	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
270	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
271	if (asoc->strmin == NULL) {
272		/* we didn't get memory for the streams! */
273		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
274		return (-1);
275	}
276	for (i = 0; i < asoc->streamincnt; i++) {
277		asoc->strmin[i].stream_no = i;
278		asoc->strmin[i].last_sequence_delivered = 0xffff;
279		/*
280		 * U-stream ranges will be set when the cookie is unpacked.
281		 * Or for the INIT sender they are un set (if pr-sctp not
282		 * supported) when the INIT-ACK arrives.
283		 */
284		TAILQ_INIT(&asoc->strmin[i].inqueue);
285		asoc->strmin[i].delivery_started = 0;
286	}
287	/*
288	 * load_address_from_init will put the addresses into the
289	 * association when the COOKIE is processed or the INIT-ACK is
290	 * processed. Both types of COOKIE's existing and new call this
291	 * routine. It will remove addresses that are no longer in the
292	 * association (for the restarting case where addresses are
293	 * removed). Up front when the INIT arrives we will discard it if it
294	 * is a restart and new addresses have been added.
295	 */
296	/* sa_ignore MEMLEAK */
297	return (0);
298}
299
300/*
301 * INIT-ACK message processing/consumption returns value < 0 on error
302 */
303static int
304sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
305    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
306    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
307{
308	struct sctp_association *asoc;
309	struct mbuf *op_err;
310	int retval, abort_flag;
311	uint32_t initack_limit;
312
313	/* First verify that we have no illegal param's */
314	abort_flag = 0;
315	op_err = NULL;
316
317	op_err = sctp_arethere_unrecognized_parameters(m,
318	    (offset + sizeof(struct sctp_init_chunk)),
319	    &abort_flag, (struct sctp_chunkhdr *)cp);
320	if (abort_flag) {
321		/* Send an abort and notify peer */
322		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err);
323		*abort_no_unlock = 1;
324		return (-1);
325	}
326	asoc = &stcb->asoc;
327	/* process the peer's parameters in the INIT-ACK */
328	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
329	if (retval < 0) {
330		return (retval);
331	}
332	initack_limit = offset + ntohs(cp->ch.chunk_length);
333	/* load all addresses */
334	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
335	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
336	    NULL))) {
337		/* Huh, we should abort */
338		SCTPDBG(SCTP_DEBUG_INPUT1,
339		    "Load addresses from INIT causes an abort %d\n",
340		    retval);
341		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
342		    NULL, 0);
343		*abort_no_unlock = 1;
344		return (-1);
345	}
346	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
347	    stcb->asoc.local_hmacs);
348	if (op_err) {
349		sctp_queue_op_err(stcb, op_err);
350		/* queuing will steal away the mbuf chain to the out queue */
351		op_err = NULL;
352	}
353	/* extract the cookie and queue it to "echo" it back... */
354	stcb->asoc.overall_error_count = 0;
355	net->error_count = 0;
356
357	/*
358	 * Cancel the INIT timer, We do this first before queueing the
359	 * cookie. We always cancel at the primary to assue that we are
360	 * canceling the timer started by the INIT which always goes to the
361	 * primary.
362	 */
363	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
364	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
365
366	/* calculate the RTO */
367	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered);
368
369	retval = sctp_send_cookie_echo(m, offset, stcb, net);
370	if (retval < 0) {
371		/*
372		 * No cookie, we probably should send a op error. But in any
373		 * case if there is no cookie in the INIT-ACK, we can
374		 * abandon the peer, its broke.
375		 */
376		if (retval == -3) {
377			/* We abort with an error of missing mandatory param */
378			op_err =
379			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
380			if (op_err) {
381				/*
382				 * Expand beyond to include the mandatory
383				 * param cookie
384				 */
385				struct sctp_inv_mandatory_param *mp;
386
387				SCTP_BUF_LEN(op_err) =
388				    sizeof(struct sctp_inv_mandatory_param);
389				mp = mtod(op_err,
390				    struct sctp_inv_mandatory_param *);
391				/* Subtract the reserved param */
392				mp->length =
393				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
394				mp->num_param = htonl(1);
395				mp->param = htons(SCTP_STATE_COOKIE);
396				mp->resv = 0;
397			}
398			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
399			    sh, op_err, 0);
400			*abort_no_unlock = 1;
401		}
402		return (retval);
403	}
404	return (0);
405}
406
407static void
408sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
409    struct sctp_tcb *stcb, struct sctp_nets *net)
410{
411	struct sockaddr_storage store;
412	struct sockaddr_in *sin;
413	struct sockaddr_in6 *sin6;
414	struct sctp_nets *r_net;
415	struct timeval tv;
416
417	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
418		/* Invalid length */
419		return;
420	}
421	sin = (struct sockaddr_in *)&store;
422	sin6 = (struct sockaddr_in6 *)&store;
423
424	memset(&store, 0, sizeof(store));
425	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
426	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
427		sin->sin_family = cp->heartbeat.hb_info.addr_family;
428		sin->sin_len = cp->heartbeat.hb_info.addr_len;
429		sin->sin_port = stcb->rport;
430		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
431		    sizeof(sin->sin_addr));
432	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
433	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
434		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
435		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
436		sin6->sin6_port = stcb->rport;
437		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
438		    sizeof(sin6->sin6_addr));
439	} else {
440		return;
441	}
442	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
443	if (r_net == NULL) {
444		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
445		return;
446	}
447	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
448	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
449	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
450		/*
451		 * If the its a HB and it's random value is correct when can
452		 * confirm the destination.
453		 */
454		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
455		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
456			stcb->asoc.primary_destination = r_net;
457			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
458			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
459			r_net = TAILQ_FIRST(&stcb->asoc.nets);
460			if (r_net != stcb->asoc.primary_destination) {
461				/*
462				 * first one on the list is NOT the primary
463				 * sctp_cmpaddr() is much more efficent if
464				 * the primary is the first on the list,
465				 * make it so.
466				 */
467				TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
468				TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
469			}
470		}
471		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
472		    stcb, 0, (void *)r_net);
473	}
474	r_net->error_count = 0;
475	r_net->hb_responded = 1;
476	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
477	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
478	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
479		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
480		r_net->dest_state |= SCTP_ADDR_REACHABLE;
481		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
482		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net);
483		/* now was it the primary? if so restore */
484		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
485			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
486		}
487	}
488	/* Now lets do a RTO with this */
489	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv);
490}
491
492static void
493sctp_handle_abort(struct sctp_abort_chunk *cp,
494    struct sctp_tcb *stcb, struct sctp_nets *net)
495{
496	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
497	if (stcb == NULL)
498		return;
499
500	/* stop any receive timers */
501	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
502	/* notify user of the abort and clean up... */
503	sctp_abort_notification(stcb, 0);
504	/* free the tcb */
505	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
506	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
507	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
508		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
509	}
510#ifdef SCTP_ASOCLOG_OF_TSNS
511	sctp_print_out_track_log(stcb);
512#endif
513	sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
514	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
515}
516
517static void
518sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
519    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
520{
521	struct sctp_association *asoc;
522	int some_on_streamwheel;
523
524	SCTPDBG(SCTP_DEBUG_INPUT2,
525	    "sctp_handle_shutdown: handling SHUTDOWN\n");
526	if (stcb == NULL)
527		return;
528	asoc = &stcb->asoc;
529	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
530	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
531		return;
532	}
533	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
534		/* Shutdown NOT the expected size */
535		return;
536	} else {
537		sctp_update_acked(stcb, cp, net, abort_flag);
538	}
539	if (asoc->control_pdapi) {
540		/*
541		 * With a normal shutdown we assume the end of last record.
542		 */
543		SCTP_INP_READ_LOCK(stcb->sctp_ep);
544		asoc->control_pdapi->end_added = 1;
545		asoc->control_pdapi->pdapi_aborted = 1;
546		asoc->control_pdapi = NULL;
547		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
548		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
549	}
550	/* goto SHUTDOWN_RECEIVED state to block new requests */
551	if (stcb->sctp_socket) {
552		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
553		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
554		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
555			asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED;
556			/*
557			 * notify upper layer that peer has initiated a
558			 * shutdown
559			 */
560			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL);
561
562			/* reset time */
563			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
564		}
565	}
566	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
567		/*
568		 * stop the shutdown timer, since we WILL move to
569		 * SHUTDOWN-ACK-SENT.
570		 */
571		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
572	}
573	/* Now are we there yet? */
574	some_on_streamwheel = 0;
575	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
576		/* Check to see if some data queued */
577		struct sctp_stream_out *outs;
578
579		TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
580			if (!TAILQ_EMPTY(&outs->outqueue)) {
581				some_on_streamwheel = 1;
582				break;
583			}
584		}
585	}
586	if (!TAILQ_EMPTY(&asoc->send_queue) ||
587	    !TAILQ_EMPTY(&asoc->sent_queue) ||
588	    some_on_streamwheel) {
589		/* By returning we will push more data out */
590		return;
591	} else {
592		/* no outstanding data to send, so move on... */
593		/* send SHUTDOWN-ACK */
594		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
595		/* move to SHUTDOWN-ACK-SENT state */
596		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
597		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
598			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
599		}
600		asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
601
602		/* start SHUTDOWN timer */
603		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
604		    stcb, net);
605	}
606}
607
608static void
609sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
610    struct sctp_tcb *stcb, struct sctp_nets *net)
611{
612	struct sctp_association *asoc;
613
614	SCTPDBG(SCTP_DEBUG_INPUT2,
615	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
616	if (stcb == NULL)
617		return;
618
619	asoc = &stcb->asoc;
620	/* process according to association state */
621	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
622	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
623		/* unexpected SHUTDOWN-ACK... so ignore... */
624		SCTP_TCB_UNLOCK(stcb);
625		return;
626	}
627	if (asoc->control_pdapi) {
628		/*
629		 * With a normal shutdown we assume the end of last record.
630		 */
631		SCTP_INP_READ_LOCK(stcb->sctp_ep);
632		asoc->control_pdapi->end_added = 1;
633		asoc->control_pdapi->pdapi_aborted = 1;
634		asoc->control_pdapi = NULL;
635		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
636		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
637	}
638	/* are the queues empty? */
639	if (!TAILQ_EMPTY(&asoc->send_queue) ||
640	    !TAILQ_EMPTY(&asoc->sent_queue) ||
641	    !TAILQ_EMPTY(&asoc->out_wheel)) {
642		sctp_report_all_outbound(stcb, 0);
643	}
644	/* stop the timer */
645	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
646	/* send SHUTDOWN-COMPLETE */
647	sctp_send_shutdown_complete(stcb, net);
648	/* notify upper layer protocol */
649	if (stcb->sctp_socket) {
650		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
651		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
652		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
653			/* Set the connected flag to disconnected */
654			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
655		}
656	}
657	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
658	/* free the TCB but first save off the ep */
659	sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
660	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
661}
662
663/*
664 * Skip past the param header and then we will find the chunk that caused the
665 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
666 * our peer must be broken.
667 */
668static void
669sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
670    struct sctp_nets *net)
671{
672	struct sctp_chunkhdr *chk;
673
674	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
675	switch (chk->chunk_type) {
676	case SCTP_ASCONF_ACK:
677	case SCTP_ASCONF:
678		sctp_asconf_cleanup(stcb, net);
679		break;
680	case SCTP_FORWARD_CUM_TSN:
681		stcb->asoc.peer_supports_prsctp = 0;
682		break;
683	default:
684		SCTPDBG(SCTP_DEBUG_INPUT2,
685		    "Peer does not support chunk type %d(%x)??\n",
686		    chk->chunk_type, (uint32_t) chk->chunk_type);
687		break;
688	}
689}
690
691/*
692 * Skip past the param header and then we will find the param that caused the
693 * problem.  There are a number of param's in a ASCONF OR the prsctp param
694 * these will turn of specific features.
695 */
696static void
697sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
698{
699	struct sctp_paramhdr *pbad;
700
701	pbad = phdr + 1;
702	switch (ntohs(pbad->param_type)) {
703		/* pr-sctp draft */
704	case SCTP_PRSCTP_SUPPORTED:
705		stcb->asoc.peer_supports_prsctp = 0;
706		break;
707	case SCTP_SUPPORTED_CHUNK_EXT:
708		break;
709		/* draft-ietf-tsvwg-addip-sctp */
710	case SCTP_ECN_NONCE_SUPPORTED:
711		stcb->asoc.peer_supports_ecn_nonce = 0;
712		stcb->asoc.ecn_nonce_allowed = 0;
713		stcb->asoc.ecn_allowed = 0;
714		break;
715	case SCTP_ADD_IP_ADDRESS:
716	case SCTP_DEL_IP_ADDRESS:
717	case SCTP_SET_PRIM_ADDR:
718		stcb->asoc.peer_supports_asconf = 0;
719		break;
720	case SCTP_SUCCESS_REPORT:
721	case SCTP_ERROR_CAUSE_IND:
722		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
723		SCTPDBG(SCTP_DEBUG_INPUT2,
724		    "Turning off ASCONF to this strange peer\n");
725		stcb->asoc.peer_supports_asconf = 0;
726		break;
727	default:
728		SCTPDBG(SCTP_DEBUG_INPUT2,
729		    "Peer does not support param type %d(%x)??\n",
730		    pbad->param_type, (uint32_t) pbad->param_type);
731		break;
732	}
733}
734
735static int
736sctp_handle_error(struct sctp_chunkhdr *ch,
737    struct sctp_tcb *stcb, struct sctp_nets *net)
738{
739	int chklen;
740	struct sctp_paramhdr *phdr;
741	uint16_t error_type;
742	uint16_t error_len;
743	struct sctp_association *asoc;
744
745	int adjust;
746
747	/* parse through all of the errors and process */
748	asoc = &stcb->asoc;
749	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
750	    sizeof(struct sctp_chunkhdr));
751	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
752	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
753		/* Process an Error Cause */
754		error_type = ntohs(phdr->param_type);
755		error_len = ntohs(phdr->param_length);
756		if ((error_len > chklen) || (error_len == 0)) {
757			/* invalid param length for this param */
758			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
759			    chklen, error_len);
760			return (0);
761		}
762		switch (error_type) {
763		case SCTP_CAUSE_INVALID_STREAM:
764		case SCTP_CAUSE_MISSING_PARAM:
765		case SCTP_CAUSE_INVALID_PARAM:
766		case SCTP_CAUSE_NO_USER_DATA:
767			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
768			    error_type);
769			break;
770		case SCTP_CAUSE_STALE_COOKIE:
771			/*
772			 * We only act if we have echoed a cookie and are
773			 * waiting.
774			 */
775			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
776				int *p;
777
778				p = (int *)((caddr_t)phdr + sizeof(*phdr));
779				/* Save the time doubled */
780				asoc->cookie_preserve_req = ntohl(*p) << 1;
781				asoc->stale_cookie_count++;
782				if (asoc->stale_cookie_count >
783				    asoc->max_init_times) {
784					sctp_abort_notification(stcb, 0);
785					/* now free the asoc */
786					sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
787					return (-1);
788				}
789				/* blast back to INIT state */
790				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
791				asoc->state |= SCTP_STATE_COOKIE_WAIT;
792
793				sctp_stop_all_cookie_timers(stcb);
794				sctp_send_initiate(stcb->sctp_ep, stcb);
795			}
796			break;
797		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
798			/*
799			 * Nothing we can do here, we don't do hostname
800			 * addresses so if the peer does not like my IPv6
801			 * (or IPv4 for that matter) it does not matter. If
802			 * they don't support that type of address, they can
803			 * NOT possibly get that packet type... i.e. with no
804			 * IPv6 you can't recieve a IPv6 packet. so we can
805			 * safely ignore this one. If we ever added support
806			 * for HOSTNAME Addresses, then we would need to do
807			 * something here.
808			 */
809			break;
810		case SCTP_CAUSE_UNRECOG_CHUNK:
811			sctp_process_unrecog_chunk(stcb, phdr, net);
812			break;
813		case SCTP_CAUSE_UNRECOG_PARAM:
814			sctp_process_unrecog_param(stcb, phdr);
815			break;
816		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
817			/*
818			 * We ignore this since the timer will drive out a
819			 * new cookie anyway and there timer will drive us
820			 * to send a SHUTDOWN_COMPLETE. We can't send one
821			 * here since we don't have their tag.
822			 */
823			break;
824		case SCTP_CAUSE_DELETING_LAST_ADDR:
825		case SCTP_CAUSE_RESOURCE_SHORTAGE:
826		case SCTP_CAUSE_DELETING_SRC_ADDR:
827			/*
828			 * We should NOT get these here, but in a
829			 * ASCONF-ACK.
830			 */
831			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
832			    error_type);
833			break;
834		case SCTP_CAUSE_OUT_OF_RESC:
835			/*
836			 * And what, pray tell do we do with the fact that
837			 * the peer is out of resources? Not really sure we
838			 * could do anything but abort. I suspect this
839			 * should have came WITH an abort instead of in a
840			 * OP-ERROR.
841			 */
842			break;
843		default:
844			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
845			    error_type);
846			break;
847		}
848		adjust = SCTP_SIZE32(error_len);
849		chklen -= adjust;
850		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
851	}
852	return (0);
853}
854
855static int
856sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
857    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
858    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
859{
860	struct sctp_init_ack *init_ack;
861	int *state;
862	struct mbuf *op_err;
863
864	SCTPDBG(SCTP_DEBUG_INPUT2,
865	    "sctp_handle_init_ack: handling INIT-ACK\n");
866
867	if (stcb == NULL) {
868		SCTPDBG(SCTP_DEBUG_INPUT2,
869		    "sctp_handle_init_ack: TCB is null\n");
870		return (-1);
871	}
872	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
873		/* Invalid length */
874		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
875		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
876		    op_err, 0);
877		*abort_no_unlock = 1;
878		return (-1);
879	}
880	init_ack = &cp->init;
881	/* validate parameters */
882	if (init_ack->initiate_tag == 0) {
883		/* protocol error... send an abort */
884		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
885		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
886		    op_err, 0);
887		*abort_no_unlock = 1;
888		return (-1);
889	}
890	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
891		/* protocol error... send an abort */
892		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
893		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
894		    op_err, 0);
895		*abort_no_unlock = 1;
896		return (-1);
897	}
898	if (init_ack->num_inbound_streams == 0) {
899		/* protocol error... send an abort */
900		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
901		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
902		    op_err, 0);
903		*abort_no_unlock = 1;
904		return (-1);
905	}
906	if (init_ack->num_outbound_streams == 0) {
907		/* protocol error... send an abort */
908		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
909		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
910		    op_err, 0);
911		*abort_no_unlock = 1;
912		return (-1);
913	}
914	/* process according to association state... */
915	state = &stcb->asoc.state;
916	switch (*state & SCTP_STATE_MASK) {
917	case SCTP_STATE_COOKIE_WAIT:
918		/* this is the expected state for this chunk */
919		/* process the INIT-ACK parameters */
920		if (stcb->asoc.primary_destination->dest_state &
921		    SCTP_ADDR_UNCONFIRMED) {
922			/*
923			 * The primary is where we sent the INIT, we can
924			 * always consider it confirmed when the INIT-ACK is
925			 * returned. Do this before we load addresses
926			 * though.
927			 */
928			stcb->asoc.primary_destination->dest_state &=
929			    ~SCTP_ADDR_UNCONFIRMED;
930			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
931			    stcb, 0, (void *)stcb->asoc.primary_destination);
932		}
933		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
934		    net, abort_no_unlock, vrf_id) < 0) {
935			/* error in parsing parameters */
936			return (-1);
937		}
938		/* update our state */
939		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
940		if (*state & SCTP_STATE_SHUTDOWN_PENDING) {
941			*state = SCTP_STATE_COOKIE_ECHOED |
942			    SCTP_STATE_SHUTDOWN_PENDING;
943		} else {
944			*state = SCTP_STATE_COOKIE_ECHOED;
945		}
946
947		/* reset the RTO calc */
948		stcb->asoc.overall_error_count = 0;
949		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
950		/*
951		 * collapse the init timer back in case of a exponential
952		 * backoff
953		 */
954		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
955		    stcb, net);
956		/*
957		 * the send at the end of the inbound data processing will
958		 * cause the cookie to be sent
959		 */
960		break;
961	case SCTP_STATE_SHUTDOWN_SENT:
962		/* incorrect state... discard */
963		break;
964	case SCTP_STATE_COOKIE_ECHOED:
965		/* incorrect state... discard */
966		break;
967	case SCTP_STATE_OPEN:
968		/* incorrect state... discard */
969		break;
970	case SCTP_STATE_EMPTY:
971	case SCTP_STATE_INUSE:
972	default:
973		/* incorrect state... discard */
974		return (-1);
975		break;
976	}
977	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
978	return (0);
979}
980
981
982/*
983 * handle a state cookie for an existing association m: input packet mbuf
984 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
985 * "split" mbuf and the cookie signature does not exist offset: offset into
986 * mbuf to the cookie-echo chunk
987 */
988static struct sctp_tcb *
989sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
990    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
991    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
992    struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
993    uint32_t vrf_id)
994{
995	struct sctp_association *asoc;
996	struct sctp_init_chunk *init_cp, init_buf;
997	struct sctp_init_ack_chunk *initack_cp, initack_buf;
998	int chk_length;
999	int init_offset, initack_offset, i;
1000	int retval;
1001	int spec_flag = 0;
1002	uint32_t how_indx;
1003
1004	/* I know that the TCB is non-NULL from the caller */
1005	asoc = &stcb->asoc;
1006	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1007		if (asoc->cookie_how[how_indx] == 0)
1008			break;
1009	}
1010	if (how_indx < sizeof(asoc->cookie_how)) {
1011		asoc->cookie_how[how_indx] = 1;
1012	}
1013	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1014		/* SHUTDOWN came in after sending INIT-ACK */
1015		struct mbuf *op_err;
1016		struct sctp_paramhdr *ph;
1017
1018		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1019		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1020		    0, M_DONTWAIT, 1, MT_DATA);
1021		if (op_err == NULL) {
1022			/* FOOBAR */
1023			return (NULL);
1024		}
1025		/* pre-reserve some space */
1026		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1027		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1028		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1029		/* Set the len */
1030		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1031		ph = mtod(op_err, struct sctp_paramhdr *);
1032		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1033		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1034		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1035		    vrf_id);
1036		if (how_indx < sizeof(asoc->cookie_how))
1037			asoc->cookie_how[how_indx] = 2;
1038		return (NULL);
1039	}
1040	/*
1041	 * find and validate the INIT chunk in the cookie (peer's info) the
1042	 * INIT should start after the cookie-echo header struct (chunk
1043	 * header, state cookie header struct)
1044	 */
1045	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1046
1047	init_cp = (struct sctp_init_chunk *)
1048	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1049	    (uint8_t *) & init_buf);
1050	if (init_cp == NULL) {
1051		/* could not pull a INIT chunk in cookie */
1052		return (NULL);
1053	}
1054	chk_length = ntohs(init_cp->ch.chunk_length);
1055	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1056		return (NULL);
1057	}
1058	/*
1059	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1060	 * INIT-ACK follows the INIT chunk
1061	 */
1062	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1063	initack_cp = (struct sctp_init_ack_chunk *)
1064	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1065	    (uint8_t *) & initack_buf);
1066	if (initack_cp == NULL) {
1067		/* could not pull INIT-ACK chunk in cookie */
1068		return (NULL);
1069	}
1070	chk_length = ntohs(initack_cp->ch.chunk_length);
1071	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1072		return (NULL);
1073	}
1074	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1075	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1076		/*
1077		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1078		 * to get into the OPEN state
1079		 */
1080		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1081#ifdef INVARIANTS
1082			panic("Case D and non-match seq?");
1083#else
1084			SCTP_PRINTF("Case D, seq non-match %x vs %x?\n",
1085			    ntohl(initack_cp->init.initial_tsn),
1086			    asoc->init_seq_number);
1087#endif
1088		}
1089		switch SCTP_GET_STATE
1090			(asoc) {
1091		case SCTP_STATE_COOKIE_WAIT:
1092		case SCTP_STATE_COOKIE_ECHOED:
1093			/*
1094			 * INIT was sent but got a COOKIE_ECHO with the
1095			 * correct tags... just accept it...but we must
1096			 * process the init so that we can make sure we have
1097			 * the right seq no's.
1098			 */
1099			/* First we must process the INIT !! */
1100			retval = sctp_process_init(init_cp, stcb, net);
1101			if (retval < 0) {
1102				if (how_indx < sizeof(asoc->cookie_how))
1103					asoc->cookie_how[how_indx] = 3;
1104				return (NULL);
1105			}
1106			/* we have already processed the INIT so no problem */
1107			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1108			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1109			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1110			/* update current state */
1111			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1112				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1113			else
1114				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1115			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1116				asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1117				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1118				    stcb->sctp_ep, stcb, asoc->primary_destination);
1119
1120			} else {
1121				/* if ok, move to OPEN state */
1122				asoc->state = SCTP_STATE_OPEN;
1123			}
1124			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1125			sctp_stop_all_cookie_timers(stcb);
1126			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1127			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1128			    (inp->sctp_socket->so_qlimit == 0)
1129			    ) {
1130				/*
1131				 * Here is where collision would go if we
1132				 * did a connect() and instead got a
1133				 * init/init-ack/cookie done before the
1134				 * init-ack came back..
1135				 */
1136				stcb->sctp_ep->sctp_flags |=
1137				    SCTP_PCB_FLAGS_CONNECTED;
1138				soisconnected(stcb->sctp_ep->sctp_socket);
1139			}
1140			/* notify upper layer */
1141			*notification = SCTP_NOTIFY_ASSOC_UP;
1142			/*
1143			 * since we did not send a HB make sure we don't
1144			 * double things
1145			 */
1146			net->hb_responded = 1;
1147
1148			if (stcb->asoc.sctp_autoclose_ticks &&
1149			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1150				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1151				    inp, stcb, NULL);
1152			}
1153			break;
1154		default:
1155			/*
1156			 * we're in the OPEN state (or beyond), so peer must
1157			 * have simply lost the COOKIE-ACK
1158			 */
1159			break;
1160			}	/* end switch */
1161		sctp_stop_all_cookie_timers(stcb);
1162		/*
1163		 * We ignore the return code here.. not sure if we should
1164		 * somehow abort.. but we do have an existing asoc. This
1165		 * really should not fail.
1166		 */
1167		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1168		    init_offset + sizeof(struct sctp_init_chunk),
1169		    initack_offset, sh, init_src)) {
1170			if (how_indx < sizeof(asoc->cookie_how))
1171				asoc->cookie_how[how_indx] = 4;
1172			return (NULL);
1173		}
1174		/* respond with a COOKIE-ACK */
1175		sctp_toss_old_cookies(stcb, asoc);
1176		sctp_send_cookie_ack(stcb);
1177		if (how_indx < sizeof(asoc->cookie_how))
1178			asoc->cookie_how[how_indx] = 5;
1179		return (stcb);
1180	}
1181	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1182	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1183	    cookie->tie_tag_my_vtag == 0 &&
1184	    cookie->tie_tag_peer_vtag == 0) {
1185		/*
1186		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1187		 */
1188		if (how_indx < sizeof(asoc->cookie_how))
1189			asoc->cookie_how[how_indx] = 6;
1190		return (NULL);
1191	}
1192	if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
1193	    (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
1194	    init_cp->init.initiate_tag == 0)) {
1195		/*
1196		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1197		 * should be ok, re-accept peer info
1198		 */
1199		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1200			/*
1201			 * Extension of case C. If we hit this, then the
1202			 * random number generator returned the same vtag
1203			 * when we first sent our INIT-ACK and when we later
1204			 * sent our INIT. The side with the seq numbers that
1205			 * are different will be the one that normnally
1206			 * would have hit case C. This in effect "extends"
1207			 * our vtags in this collision case to be 64 bits.
1208			 * The same collision could occur aka you get both
1209			 * vtag and seq number the same twice in a row.. but
1210			 * is much less likely. If it did happen then we
1211			 * would proceed through and bring up the assoc.. we
1212			 * may end up with the wrong stream setup however..
1213			 * which would be bad.. but there is no way to
1214			 * tell.. until we send on a stream that does not
1215			 * exist :-)
1216			 */
1217			if (how_indx < sizeof(asoc->cookie_how))
1218				asoc->cookie_how[how_indx] = 7;
1219
1220			return (NULL);
1221		}
1222		if (how_indx < sizeof(asoc->cookie_how))
1223			asoc->cookie_how[how_indx] = 8;
1224		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1225		sctp_stop_all_cookie_timers(stcb);
1226		/*
1227		 * since we did not send a HB make sure we don't double
1228		 * things
1229		 */
1230		net->hb_responded = 1;
1231		if (stcb->asoc.sctp_autoclose_ticks &&
1232		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1233			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1234			    NULL);
1235		}
1236		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1237		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1238
1239		/* Note last_cwr_tsn? where is this used? */
1240		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1241		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1242			/*
1243			 * Ok the peer probably discarded our data (if we
1244			 * echoed a cookie+data). So anything on the
1245			 * sent_queue should be marked for retransmit, we
1246			 * may not get something to kick us so it COULD
1247			 * still take a timeout to move these.. but it can't
1248			 * hurt to mark them.
1249			 */
1250			struct sctp_tmit_chunk *chk;
1251
1252			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1253				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1254					chk->sent = SCTP_DATAGRAM_RESEND;
1255					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1256					spec_flag++;
1257				}
1258			}
1259
1260		}
1261		/* process the INIT info (peer's info) */
1262		retval = sctp_process_init(init_cp, stcb, net);
1263		if (retval < 0) {
1264			if (how_indx < sizeof(asoc->cookie_how))
1265				asoc->cookie_how[how_indx] = 9;
1266			return (NULL);
1267		}
1268		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1269		    init_offset + sizeof(struct sctp_init_chunk),
1270		    initack_offset, sh, init_src)) {
1271			if (how_indx < sizeof(asoc->cookie_how))
1272				asoc->cookie_how[how_indx] = 10;
1273			return (NULL);
1274		}
1275		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1276		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1277			*notification = SCTP_NOTIFY_ASSOC_UP;
1278
1279			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1280			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1281			    (inp->sctp_socket->so_qlimit == 0)) {
1282				stcb->sctp_ep->sctp_flags |=
1283				    SCTP_PCB_FLAGS_CONNECTED;
1284				soisconnected(stcb->sctp_ep->sctp_socket);
1285			}
1286			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1287				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1288			else
1289				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1290			SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1291			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1292		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1293			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1294		} else {
1295			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1296		}
1297		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1298			asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1299			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1300			    stcb->sctp_ep, stcb, asoc->primary_destination);
1301
1302		} else {
1303			asoc->state = SCTP_STATE_OPEN;
1304		}
1305		sctp_stop_all_cookie_timers(stcb);
1306		sctp_toss_old_cookies(stcb, asoc);
1307		sctp_send_cookie_ack(stcb);
1308		if (spec_flag) {
1309			/*
1310			 * only if we have retrans set do we do this. What
1311			 * this call does is get only the COOKIE-ACK out and
1312			 * then when we return the normal call to
1313			 * sctp_chunk_output will get the retrans out behind
1314			 * this.
1315			 */
1316			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK);
1317		}
1318		if (how_indx < sizeof(asoc->cookie_how))
1319			asoc->cookie_how[how_indx] = 11;
1320
1321		return (stcb);
1322	}
1323	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1324	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1325	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1326	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1327	    cookie->tie_tag_peer_vtag != 0) {
1328		struct sctpasochead *head;
1329
1330		/*
1331		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1332		 */
1333		/* temp code */
1334		if (how_indx < sizeof(asoc->cookie_how))
1335			asoc->cookie_how[how_indx] = 12;
1336		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1337		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1338
1339		*sac_assoc_id = sctp_get_associd(stcb);
1340		/* notify upper layer */
1341		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1342		atomic_add_int(&stcb->asoc.refcnt, 1);
1343		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1344		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1345		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1346			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1347		}
1348		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1349			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1350		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1351			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1352		}
1353		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1354			asoc->state = SCTP_STATE_OPEN |
1355			    SCTP_STATE_SHUTDOWN_PENDING;
1356			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1357			    stcb->sctp_ep, stcb, asoc->primary_destination);
1358
1359		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1360			/* move to OPEN state, if not in SHUTDOWN_SENT */
1361			asoc->state = SCTP_STATE_OPEN;
1362		}
1363		asoc->pre_open_streams =
1364		    ntohs(initack_cp->init.num_outbound_streams);
1365		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1366		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1367
1368		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1369		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1370
1371		asoc->str_reset_seq_in = asoc->init_seq_number;
1372
1373		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1374		if (asoc->mapping_array) {
1375			memset(asoc->mapping_array, 0,
1376			    asoc->mapping_array_size);
1377		}
1378		SCTP_TCB_UNLOCK(stcb);
1379		SCTP_INP_INFO_WLOCK();
1380		SCTP_INP_WLOCK(stcb->sctp_ep);
1381		SCTP_TCB_LOCK(stcb);
1382		atomic_add_int(&stcb->asoc.refcnt, -1);
1383		/* send up all the data */
1384		SCTP_TCB_SEND_LOCK(stcb);
1385
1386		sctp_report_all_outbound(stcb, 1);
1387		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1388			stcb->asoc.strmout[i].stream_no = i;
1389			stcb->asoc.strmout[i].next_sequence_sent = 0;
1390			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1391		}
1392		/* process the INIT-ACK info (my info) */
1393		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1394		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1395
1396		/* pull from vtag hash */
1397		LIST_REMOVE(stcb, sctp_asocs);
1398		/* re-insert to new vtag position */
1399		head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1400		    sctppcbinfo.hashasocmark)];
1401		/*
1402		 * put it in the bucket in the vtag hash of assoc's for the
1403		 * system
1404		 */
1405		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1406
1407		/* Is this the first restart? */
1408		if (stcb->asoc.in_restart_hash == 0) {
1409			/* Ok add it to assoc_id vtag hash */
1410			head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
1411			    sctppcbinfo.hashrestartmark)];
1412			LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
1413			stcb->asoc.in_restart_hash = 1;
1414		}
1415		/* process the INIT info (peer's info) */
1416		SCTP_TCB_SEND_UNLOCK(stcb);
1417		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1418		SCTP_INP_INFO_WUNLOCK();
1419
1420		retval = sctp_process_init(init_cp, stcb, net);
1421		if (retval < 0) {
1422			if (how_indx < sizeof(asoc->cookie_how))
1423				asoc->cookie_how[how_indx] = 13;
1424
1425			return (NULL);
1426		}
1427		/*
1428		 * since we did not send a HB make sure we don't double
1429		 * things
1430		 */
1431		net->hb_responded = 1;
1432
1433		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1434		    init_offset + sizeof(struct sctp_init_chunk),
1435		    initack_offset, sh, init_src)) {
1436			if (how_indx < sizeof(asoc->cookie_how))
1437				asoc->cookie_how[how_indx] = 14;
1438
1439			return (NULL);
1440		}
1441		/* respond with a COOKIE-ACK */
1442		sctp_stop_all_cookie_timers(stcb);
1443		sctp_toss_old_cookies(stcb, asoc);
1444		sctp_send_cookie_ack(stcb);
1445		if (how_indx < sizeof(asoc->cookie_how))
1446			asoc->cookie_how[how_indx] = 15;
1447
1448		return (stcb);
1449	}
1450	if (how_indx < sizeof(asoc->cookie_how))
1451		asoc->cookie_how[how_indx] = 16;
1452	/* all other cases... */
1453	return (NULL);
1454}
1455
1456
1457/*
1458 * handle a state cookie for a new association m: input packet mbuf chain--
1459 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1460 * and the cookie signature does not exist offset: offset into mbuf to the
1461 * cookie-echo chunk length: length of the cookie chunk to: where the init
1462 * was from returns a new TCB
1463 */
1464static struct sctp_tcb *
1465sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1466    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1467    struct sctp_inpcb *inp, struct sctp_nets **netp,
1468    struct sockaddr *init_src, int *notification,
1469    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1470    uint32_t vrf_id)
1471{
1472	struct sctp_tcb *stcb;
1473	struct sctp_init_chunk *init_cp, init_buf;
1474	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1475	struct sockaddr_storage sa_store;
1476	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1477	struct sockaddr_in *sin;
1478	struct sockaddr_in6 *sin6;
1479	struct sctp_association *asoc;
1480	int chk_length;
1481	int init_offset, initack_offset, initack_limit;
1482	int retval;
1483	int error = 0;
1484	uint32_t old_tag;
1485	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1486
1487	/*
1488	 * find and validate the INIT chunk in the cookie (peer's info) the
1489	 * INIT should start after the cookie-echo header struct (chunk
1490	 * header, state cookie header struct)
1491	 */
1492	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1493	init_cp = (struct sctp_init_chunk *)
1494	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1495	    (uint8_t *) & init_buf);
1496	if (init_cp == NULL) {
1497		/* could not pull a INIT chunk in cookie */
1498		SCTPDBG(SCTP_DEBUG_INPUT1,
1499		    "process_cookie_new: could not pull INIT chunk hdr\n");
1500		return (NULL);
1501	}
1502	chk_length = ntohs(init_cp->ch.chunk_length);
1503	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1504		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1505		return (NULL);
1506	}
1507	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1508	/*
1509	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1510	 * INIT-ACK follows the INIT chunk
1511	 */
1512	initack_cp = (struct sctp_init_ack_chunk *)
1513	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1514	    (uint8_t *) & initack_buf);
1515	if (initack_cp == NULL) {
1516		/* could not pull INIT-ACK chunk in cookie */
1517		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1518		return (NULL);
1519	}
1520	chk_length = ntohs(initack_cp->ch.chunk_length);
1521	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1522		return (NULL);
1523	}
1524	/*
1525	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1526	 * "initack_limit" value.  This is because the chk_length field
1527	 * includes the length of the cookie, but the cookie is omitted when
1528	 * the INIT and INIT_ACK are tacked onto the cookie...
1529	 */
1530	initack_limit = offset + cookie_len;
1531
1532	/*
1533	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
1534	 * and popluate
1535	 */
1536	stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
1537	    ntohl(initack_cp->init.initiate_tag), vrf_id);
1538	if (stcb == NULL) {
1539		struct mbuf *op_err;
1540
1541		/* memory problem? */
1542		SCTPDBG(SCTP_DEBUG_INPUT1,
1543		    "process_cookie_new: no room for another TCB!\n");
1544		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1545
1546		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1547		    sh, op_err, vrf_id);
1548		return (NULL);
1549	}
1550	/* get the correct sctp_nets */
1551	if (netp)
1552		*netp = sctp_findnet(stcb, init_src);
1553
1554	asoc = &stcb->asoc;
1555	/* get scope variables out of cookie */
1556	asoc->ipv4_local_scope = cookie->ipv4_scope;
1557	asoc->site_scope = cookie->site_scope;
1558	asoc->local_scope = cookie->local_scope;
1559	asoc->loopback_scope = cookie->loopback_scope;
1560
1561	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
1562	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
1563		struct mbuf *op_err;
1564
1565		/*
1566		 * Houston we have a problem. The EP changed while the
1567		 * cookie was in flight. Only recourse is to abort the
1568		 * association.
1569		 */
1570		atomic_add_int(&stcb->asoc.refcnt, 1);
1571		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1572		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1573		    sh, op_err, vrf_id);
1574		atomic_add_int(&stcb->asoc.refcnt, -1);
1575		return (NULL);
1576	}
1577	/* process the INIT-ACK info (my info) */
1578	old_tag = asoc->my_vtag;
1579	asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1580	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1581	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1582	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1583	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1584	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1585	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1586	asoc->str_reset_seq_in = asoc->init_seq_number;
1587
1588	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1589
1590	/* process the INIT info (peer's info) */
1591	if (netp)
1592		retval = sctp_process_init(init_cp, stcb, *netp);
1593	else
1594		retval = 0;
1595	if (retval < 0) {
1596		atomic_add_int(&stcb->asoc.refcnt, 1);
1597		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1598		atomic_add_int(&stcb->asoc.refcnt, -1);
1599		return (NULL);
1600	}
1601	/* load all addresses */
1602	if (sctp_load_addresses_from_init(stcb, m, iphlen,
1603	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
1604	    init_src)) {
1605		atomic_add_int(&stcb->asoc.refcnt, 1);
1606		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1607		atomic_add_int(&stcb->asoc.refcnt, -1);
1608		return (NULL);
1609	}
1610	/*
1611	 * verify any preceding AUTH chunk that was skipped
1612	 */
1613	/* pull the local authentication parameters from the cookie/init-ack */
1614	sctp_auth_get_cookie_params(stcb, m,
1615	    initack_offset + sizeof(struct sctp_init_ack_chunk),
1616	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
1617	if (auth_skipped) {
1618		struct sctp_auth_chunk *auth;
1619
1620		auth = (struct sctp_auth_chunk *)
1621		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
1622		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
1623			/* auth HMAC failed, dump the assoc and packet */
1624			SCTPDBG(SCTP_DEBUG_AUTH1,
1625			    "COOKIE-ECHO: AUTH failed\n");
1626			sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
1627			return (NULL);
1628		} else {
1629			/* remaining chunks checked... good to go */
1630			stcb->asoc.authenticated = 1;
1631		}
1632	}
1633	/* update current state */
1634	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
1635	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1636		asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1637		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1638		    stcb->sctp_ep, stcb, asoc->primary_destination);
1639	} else {
1640		asoc->state = SCTP_STATE_OPEN;
1641	}
1642	sctp_stop_all_cookie_timers(stcb);
1643	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
1644	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1645
1646	/*
1647	 * if we're doing ASCONFs, check to see if we have any new local
1648	 * addresses that need to get added to the peer (eg. addresses
1649	 * changed while cookie echo in flight).  This needs to be done
1650	 * after we go to the OPEN state to do the correct asconf
1651	 * processing. else, make sure we have the correct addresses in our
1652	 * lists
1653	 */
1654
1655	/* warning, we re-use sin, sin6, sa_store here! */
1656	/* pull in local_address (our "from" address) */
1657	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
1658		/* source addr is IPv4 */
1659		sin = (struct sockaddr_in *)initack_src;
1660		memset(sin, 0, sizeof(*sin));
1661		sin->sin_family = AF_INET;
1662		sin->sin_len = sizeof(struct sockaddr_in);
1663		sin->sin_addr.s_addr = cookie->laddress[0];
1664	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
1665		/* source addr is IPv6 */
1666		sin6 = (struct sockaddr_in6 *)initack_src;
1667		memset(sin6, 0, sizeof(*sin6));
1668		sin6->sin6_family = AF_INET6;
1669		sin6->sin6_len = sizeof(struct sockaddr_in6);
1670		sin6->sin6_scope_id = cookie->scope_id;
1671		memcpy(&sin6->sin6_addr, cookie->laddress,
1672		    sizeof(sin6->sin6_addr));
1673	} else {
1674		atomic_add_int(&stcb->asoc.refcnt, 1);
1675		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
1676		atomic_add_int(&stcb->asoc.refcnt, -1);
1677		return (NULL);
1678	}
1679
1680	sctp_check_address_list(stcb, m,
1681	    initack_offset + sizeof(struct sctp_init_ack_chunk),
1682	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
1683	    initack_src, cookie->local_scope, cookie->site_scope,
1684	    cookie->ipv4_scope, cookie->loopback_scope);
1685
1686
1687	/* set up to notify upper layer */
1688	*notification = SCTP_NOTIFY_ASSOC_UP;
1689	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1690	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1691	    (inp->sctp_socket->so_qlimit == 0)) {
1692		/*
1693		 * This is an endpoint that called connect() how it got a
1694		 * cookie that is NEW is a bit of a mystery. It must be that
1695		 * the INIT was sent, but before it got there.. a complete
1696		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
1697		 * should have went to the other code.. not here.. oh well..
1698		 * a bit of protection is worth having..
1699		 */
1700		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1701		soisconnected(stcb->sctp_ep->sctp_socket);
1702	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1703	    (inp->sctp_socket->so_qlimit)) {
1704		/*
1705		 * We don't want to do anything with this one. Since it is
1706		 * the listening guy. The timer will get started for
1707		 * accepted connections in the caller.
1708		 */
1709		;
1710	}
1711	/* since we did not send a HB make sure we don't double things */
1712	if ((netp) && (*netp))
1713		(*netp)->hb_responded = 1;
1714
1715	if (stcb->asoc.sctp_autoclose_ticks &&
1716	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1717		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
1718	}
1719	/* respond with a COOKIE-ACK */
1720	/* calculate the RTT */
1721	if ((netp) && (*netp))
1722		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
1723		    &cookie->time_entered);
1724	sctp_send_cookie_ack(stcb);
1725	return (stcb);
1726}
1727
1728
1729/*
1730 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
1731 * existing (non-NULL) TCB
1732 */
1733static struct mbuf *
1734sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
1735    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
1736    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
1737    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1738    struct sctp_tcb **locked_tcb, uint32_t vrf_id)
1739{
1740	struct sctp_state_cookie *cookie;
1741	struct sockaddr_in6 sin6;
1742	struct sockaddr_in sin;
1743	struct sctp_tcb *l_stcb = *stcb;
1744	struct sctp_inpcb *l_inp;
1745	struct sockaddr *to;
1746	sctp_assoc_t sac_restart_id;
1747	struct sctp_pcb *ep;
1748	struct mbuf *m_sig;
1749	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
1750	uint8_t *sig;
1751	uint8_t cookie_ok = 0;
1752	unsigned int size_of_pkt, sig_offset, cookie_offset;
1753	unsigned int cookie_len;
1754	struct timeval now;
1755	struct timeval time_expires;
1756	struct sockaddr_storage dest_store;
1757	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
1758	struct ip *iph;
1759	int notification = 0;
1760	struct sctp_nets *netl;
1761	int had_a_existing_tcb = 0;
1762
1763	SCTPDBG(SCTP_DEBUG_INPUT2,
1764	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
1765
1766	if (inp_p == NULL) {
1767		return (NULL);
1768	}
1769	/* First get the destination address setup too. */
1770	iph = mtod(m, struct ip *);
1771	if (iph->ip_v == IPVERSION) {
1772		/* its IPv4 */
1773		struct sockaddr_in *lsin;
1774
1775		lsin = (struct sockaddr_in *)(localep_sa);
1776		memset(lsin, 0, sizeof(*lsin));
1777		lsin->sin_family = AF_INET;
1778		lsin->sin_len = sizeof(*lsin);
1779		lsin->sin_port = sh->dest_port;
1780		lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
1781		size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
1782	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1783		/* its IPv6 */
1784		struct ip6_hdr *ip6;
1785		struct sockaddr_in6 *lsin6;
1786
1787		lsin6 = (struct sockaddr_in6 *)(localep_sa);
1788		memset(lsin6, 0, sizeof(*lsin6));
1789		lsin6->sin6_family = AF_INET6;
1790		lsin6->sin6_len = sizeof(struct sockaddr_in6);
1791		ip6 = mtod(m, struct ip6_hdr *);
1792		lsin6->sin6_port = sh->dest_port;
1793		lsin6->sin6_addr = ip6->ip6_dst;
1794		size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
1795	} else {
1796		return (NULL);
1797	}
1798
1799	cookie = &cp->cookie;
1800	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
1801	cookie_len = ntohs(cp->ch.chunk_length);
1802
1803	if ((cookie->peerport != sh->src_port) &&
1804	    (cookie->myport != sh->dest_port) &&
1805	    (cookie->my_vtag != sh->v_tag)) {
1806		/*
1807		 * invalid ports or bad tag.  Note that we always leave the
1808		 * v_tag in the header in network order and when we stored
1809		 * it in the my_vtag slot we also left it in network order.
1810		 * This maintains the match even though it may be in the
1811		 * opposite byte order of the machine :->
1812		 */
1813		return (NULL);
1814	}
1815	if (cookie_len > size_of_pkt ||
1816	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
1817	    sizeof(struct sctp_init_chunk) +
1818	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
1819		/* cookie too long!  or too small */
1820		return (NULL);
1821	}
1822	/*
1823	 * split off the signature into its own mbuf (since it should not be
1824	 * calculated in the sctp_hmac_m() call).
1825	 */
1826	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
1827	if (sig_offset > size_of_pkt) {
1828		/* packet not correct size! */
1829		/* XXX this may already be accounted for earlier... */
1830		return (NULL);
1831	}
1832	m_sig = m_split(m, sig_offset, M_DONTWAIT);
1833	if (m_sig == NULL) {
1834		/* out of memory or ?? */
1835		return (NULL);
1836	}
1837	/*
1838	 * compute the signature/digest for the cookie
1839	 */
1840	ep = &(*inp_p)->sctp_ep;
1841	l_inp = *inp_p;
1842	if (l_stcb) {
1843		SCTP_TCB_UNLOCK(l_stcb);
1844	}
1845	SCTP_INP_RLOCK(l_inp);
1846	if (l_stcb) {
1847		SCTP_TCB_LOCK(l_stcb);
1848	}
1849	/* which cookie is it? */
1850	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
1851	    (ep->current_secret_number != ep->last_secret_number)) {
1852		/* it's the old cookie */
1853		(void)sctp_hmac_m(SCTP_HMAC,
1854		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1855		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig);
1856	} else {
1857		/* it's the current cookie */
1858		(void)sctp_hmac_m(SCTP_HMAC,
1859		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
1860		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig);
1861	}
1862	/* get the signature */
1863	SCTP_INP_RUNLOCK(l_inp);
1864	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
1865	if (sig == NULL) {
1866		/* couldn't find signature */
1867		sctp_m_freem(m_sig);
1868		return (NULL);
1869	}
1870	/* compare the received digest with the computed digest */
1871	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
1872		/* try the old cookie? */
1873		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
1874		    (ep->current_secret_number != ep->last_secret_number)) {
1875			/* compute digest with old */
1876			(void)sctp_hmac_m(SCTP_HMAC,
1877			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1878			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig);
1879			/* compare */
1880			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
1881				cookie_ok = 1;
1882		}
1883	} else {
1884		cookie_ok = 1;
1885	}
1886
1887	/*
1888	 * Now before we continue we must reconstruct our mbuf so that
1889	 * normal processing of any other chunks will work.
1890	 */
1891	{
1892		struct mbuf *m_at;
1893
1894		m_at = m;
1895		while (SCTP_BUF_NEXT(m_at) != NULL) {
1896			m_at = SCTP_BUF_NEXT(m_at);
1897		}
1898		SCTP_BUF_NEXT(m_at) = m_sig;
1899	}
1900
1901	if (cookie_ok == 0) {
1902		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
1903		SCTPDBG(SCTP_DEBUG_INPUT2,
1904		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
1905		    (uint32_t) offset, cookie_offset, sig_offset);
1906		return (NULL);
1907	}
1908	/*
1909	 * check the cookie timestamps to be sure it's not stale
1910	 */
1911	(void)SCTP_GETTIME_TIMEVAL(&now);
1912	/* Expire time is in Ticks, so we convert to seconds */
1913	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
1914	time_expires.tv_usec = cookie->time_entered.tv_usec;
1915	if (timevalcmp(&now, &time_expires, >)) {
1916		/* cookie is stale! */
1917		struct mbuf *op_err;
1918		struct sctp_stale_cookie_msg *scm;
1919		uint32_t tim;
1920
1921		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
1922		    0, M_DONTWAIT, 1, MT_DATA);
1923		if (op_err == NULL) {
1924			/* FOOBAR */
1925			return (NULL);
1926		}
1927		/* pre-reserve some space */
1928		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1929		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1930		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1931
1932		/* Set the len */
1933		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
1934		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
1935		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
1936		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
1937		    (sizeof(uint32_t))));
1938		/* seconds to usec */
1939		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
1940		/* add in usec */
1941		if (tim == 0)
1942			tim = now.tv_usec - cookie->time_entered.tv_usec;
1943		scm->time_usec = htonl(tim);
1944		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1945		    vrf_id);
1946		return (NULL);
1947	}
1948	/*
1949	 * Now we must see with the lookup address if we have an existing
1950	 * asoc. This will only happen if we were in the COOKIE-WAIT state
1951	 * and a INIT collided with us and somewhere the peer sent the
1952	 * cookie on another address besides the single address our assoc
1953	 * had for him. In this case we will have one of the tie-tags set at
1954	 * least AND the address field in the cookie can be used to look it
1955	 * up.
1956	 */
1957	to = NULL;
1958	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
1959		memset(&sin6, 0, sizeof(sin6));
1960		sin6.sin6_family = AF_INET6;
1961		sin6.sin6_len = sizeof(sin6);
1962		sin6.sin6_port = sh->src_port;
1963		sin6.sin6_scope_id = cookie->scope_id;
1964		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
1965		    sizeof(sin6.sin6_addr.s6_addr));
1966		to = (struct sockaddr *)&sin6;
1967	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
1968		memset(&sin, 0, sizeof(sin));
1969		sin.sin_family = AF_INET;
1970		sin.sin_len = sizeof(sin);
1971		sin.sin_port = sh->src_port;
1972		sin.sin_addr.s_addr = cookie->address[0];
1973		to = (struct sockaddr *)&sin;
1974	} else {
1975		/* This should not happen */
1976		return (NULL);
1977	}
1978	if ((*stcb == NULL) && to) {
1979		/* Yep, lets check */
1980		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
1981		if (*stcb == NULL) {
1982			/*
1983			 * We should have only got back the same inp. If we
1984			 * got back a different ep we have a problem. The
1985			 * original findep got back l_inp and now
1986			 */
1987			if (l_inp != *inp_p) {
1988				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
1989			}
1990		} else {
1991			if (*locked_tcb == NULL) {
1992				/*
1993				 * In this case we found the assoc only
1994				 * after we locked the create lock. This
1995				 * means we are in a colliding case and we
1996				 * must make sure that we unlock the tcb if
1997				 * its one of the cases where we throw away
1998				 * the incoming packets.
1999				 */
2000				*locked_tcb = *stcb;
2001
2002				/*
2003				 * We must also increment the inp ref count
2004				 * since the ref_count flags was set when we
2005				 * did not find the TCB, now we found it
2006				 * which reduces the refcount.. we must
2007				 * raise it back out to balance it all :-)
2008				 */
2009				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2010				if ((*stcb)->sctp_ep != l_inp) {
2011					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2012					    (*stcb)->sctp_ep, l_inp);
2013				}
2014			}
2015		}
2016	}
2017	if (to == NULL)
2018		return (NULL);
2019
2020	cookie_len -= SCTP_SIGNATURE_SIZE;
2021	if (*stcb == NULL) {
2022		/* this is the "normal" case... get a new TCB */
2023		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2024		    cookie_len, *inp_p, netp, to, &notification,
2025		    auth_skipped, auth_offset, auth_len, vrf_id);
2026	} else {
2027		/* this is abnormal... cookie-echo on existing TCB */
2028		had_a_existing_tcb = 1;
2029		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2030		    cookie, cookie_len, *inp_p, *stcb, *netp, to,
2031		    &notification, &sac_restart_id, vrf_id);
2032	}
2033
2034	if (*stcb == NULL) {
2035		/* still no TCB... must be bad cookie-echo */
2036		return (NULL);
2037	}
2038	/*
2039	 * Ok, we built an association so confirm the address we sent the
2040	 * INIT-ACK to.
2041	 */
2042	netl = sctp_findnet(*stcb, to);
2043	/*
2044	 * This code should in theory NOT run but
2045	 */
2046	if (netl == NULL) {
2047		/* TSNH! Huh, why do I need to add this address here? */
2048		int ret;
2049
2050		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2051		    SCTP_IN_COOKIE_PROC);
2052		netl = sctp_findnet(*stcb, to);
2053	}
2054	if (netl) {
2055		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2056			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2057			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2058			    netl);
2059			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2060			    (*stcb), 0, (void *)netl);
2061		}
2062	}
2063	if (*stcb) {
2064		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2065		    *stcb, NULL);
2066	}
2067	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2068		if (!had_a_existing_tcb ||
2069		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2070			/*
2071			 * If we have a NEW cookie or the connect never
2072			 * reached the connected state during collision we
2073			 * must do the TCP accept thing.
2074			 */
2075			struct socket *so, *oso;
2076			struct sctp_inpcb *inp;
2077
2078			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2079				/*
2080				 * For a restart we will keep the same
2081				 * socket, no need to do anything. I THINK!!
2082				 */
2083				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id);
2084				return (m);
2085			}
2086			oso = (*inp_p)->sctp_socket;
2087			/*
2088			 * We do this to keep the sockets side happy durin
2089			 * the sonewcon ONLY.
2090			 */
2091			NET_LOCK_GIANT();
2092			SCTP_TCB_UNLOCK((*stcb));
2093			so = sonewconn(oso, 0
2094			    );
2095			NET_UNLOCK_GIANT();
2096			SCTP_INP_WLOCK((*stcb)->sctp_ep);
2097			SCTP_TCB_LOCK((*stcb));
2098			SCTP_INP_WUNLOCK((*stcb)->sctp_ep);
2099			if (so == NULL) {
2100				struct mbuf *op_err;
2101
2102				/* Too many sockets */
2103				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2104				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2105				sctp_abort_association(*inp_p, NULL, m, iphlen,
2106				    sh, op_err, vrf_id);
2107				sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2108				return (NULL);
2109			}
2110			inp = (struct sctp_inpcb *)so->so_pcb;
2111			SCTP_INP_INCR_REF(inp);
2112			/*
2113			 * We add the unbound flag here so that if we get an
2114			 * soabort() before we get the move_pcb done, we
2115			 * will properly cleanup.
2116			 */
2117			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2118			    SCTP_PCB_FLAGS_CONNECTED |
2119			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2120			    SCTP_PCB_FLAGS_UNBOUND |
2121			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2122			    SCTP_PCB_FLAGS_DONT_WAKE);
2123			inp->sctp_features = (*inp_p)->sctp_features;
2124			inp->sctp_socket = so;
2125			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2126			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2127			inp->sctp_context = (*inp_p)->sctp_context;
2128			inp->inp_starting_point_for_iterator = NULL;
2129			/*
2130			 * copy in the authentication parameters from the
2131			 * original endpoint
2132			 */
2133			if (inp->sctp_ep.local_hmacs)
2134				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2135			inp->sctp_ep.local_hmacs =
2136			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2137			if (inp->sctp_ep.local_auth_chunks)
2138				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2139			inp->sctp_ep.local_auth_chunks =
2140			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2141			(void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
2142			    &inp->sctp_ep.shared_keys);
2143
2144			/*
2145			 * Now we must move it from one hash table to
2146			 * another and get the tcb in the right place.
2147			 */
2148			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2149
2150			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2151			SCTP_TCB_UNLOCK((*stcb));
2152
2153			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
2154			SCTP_TCB_LOCK((*stcb));
2155			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2156
2157
2158			/*
2159			 * now we must check to see if we were aborted while
2160			 * the move was going on and the lock/unlock
2161			 * happened.
2162			 */
2163			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2164				/*
2165				 * yep it was, we leave the assoc attached
2166				 * to the socket since the sctp_inpcb_free()
2167				 * call will send an abort for us.
2168				 */
2169				SCTP_INP_DECR_REF(inp);
2170				return (NULL);
2171			}
2172			SCTP_INP_DECR_REF(inp);
2173			/* Switch over to the new guy */
2174			*inp_p = inp;
2175			sctp_ulp_notify(notification, *stcb, 0, NULL);
2176
2177			/*
2178			 * Pull it from the incomplete queue and wake the
2179			 * guy
2180			 */
2181			soisconnected(so);
2182			return (m);
2183		}
2184	}
2185	if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2186		sctp_ulp_notify(notification, *stcb, 0, NULL);
2187	}
2188	return (m);
2189}
2190
2191static void
2192sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2193    struct sctp_tcb *stcb, struct sctp_nets *net)
2194{
2195	/* cp must not be used, others call this without a c-ack :-) */
2196	struct sctp_association *asoc;
2197
2198	SCTPDBG(SCTP_DEBUG_INPUT2,
2199	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2200	if (stcb == NULL)
2201		return;
2202
2203	asoc = &stcb->asoc;
2204
2205	sctp_stop_all_cookie_timers(stcb);
2206	/* process according to association state */
2207	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2208		/* state change only needed when I am in right state */
2209		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2210		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2211			asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
2212			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2213			    stcb->sctp_ep, stcb, asoc->primary_destination);
2214
2215		} else {
2216			asoc->state = SCTP_STATE_OPEN;
2217		}
2218		/* update RTO */
2219		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2220		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2221		if (asoc->overall_error_count == 0) {
2222			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2223			    &asoc->time_entered);
2224		}
2225		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2226		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL);
2227		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2228		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2229			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2230			soisconnected(stcb->sctp_ep->sctp_socket);
2231		}
2232		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2233		    stcb, net);
2234		/*
2235		 * since we did not send a HB make sure we don't double
2236		 * things
2237		 */
2238		net->hb_responded = 1;
2239
2240		if (stcb->asoc.sctp_autoclose_ticks &&
2241		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2242			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2243			    stcb->sctp_ep, stcb, NULL);
2244		}
2245		/*
2246		 * set ASCONF timer if ASCONFs are pending and allowed (eg.
2247		 * addresses changed when init/cookie echo in flight)
2248		 */
2249		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2250		    (stcb->asoc.peer_supports_asconf) &&
2251		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2252			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2253			    stcb->sctp_ep, stcb,
2254			    stcb->asoc.primary_destination);
2255		}
2256	}
2257	/* Toss the cookie if I can */
2258	sctp_toss_old_cookies(stcb, asoc);
2259	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2260		/* Restart the timer if we have pending data */
2261		struct sctp_tmit_chunk *chk;
2262
2263		chk = TAILQ_FIRST(&asoc->sent_queue);
2264		if (chk) {
2265			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2266			    stcb, chk->whoTo);
2267		}
2268	}
2269}
2270
2271static void
2272sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2273    struct sctp_tcb *stcb)
2274{
2275	struct sctp_nets *net;
2276	struct sctp_tmit_chunk *lchk;
2277	uint32_t tsn;
2278
2279	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2280		return;
2281	}
2282	SCTP_STAT_INCR(sctps_recvecne);
2283	tsn = ntohl(cp->tsn);
2284	/* ECN Nonce stuff: need a resync and disable the nonce sum check */
2285	/* Also we make sure we disable the nonce_wait */
2286	lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2287	if (lchk == NULL) {
2288		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2289	} else {
2290		stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2291	}
2292	stcb->asoc.nonce_wait_for_ecne = 0;
2293	stcb->asoc.nonce_sum_check = 0;
2294
2295	/* Find where it was sent, if possible */
2296	net = NULL;
2297	lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2298	while (lchk) {
2299		if (lchk->rec.data.TSN_seq == tsn) {
2300			net = lchk->whoTo;
2301			break;
2302		}
2303		if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2304			break;
2305		lchk = TAILQ_NEXT(lchk, sctp_next);
2306	}
2307	if (net == NULL)
2308		/* default is we use the primary */
2309		net = stcb->asoc.primary_destination;
2310
2311	if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2312#ifdef SCTP_CWND_MONITOR
2313		int old_cwnd;
2314
2315		old_cwnd = net->cwnd;
2316#endif
2317		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2318		net->ssthresh = net->cwnd / 2;
2319		if (net->ssthresh < net->mtu) {
2320			net->ssthresh = net->mtu;
2321			/* here back off the timer as well, to slow us down */
2322			net->RTO <<= 2;
2323		}
2324		net->cwnd = net->ssthresh;
2325#ifdef SCTP_CWND_MONITOR
2326		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2327#endif
2328		/*
2329		 * we reduce once every RTT. So we will only lower cwnd at
2330		 * the next sending seq i.e. the resync_tsn.
2331		 */
2332		stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2333	}
2334	/*
2335	 * We always send a CWR this way if our previous one was lost our
2336	 * peer will get an update, or if it is not time again to reduce we
2337	 * still get the cwr to the peer.
2338	 */
2339	sctp_send_cwr(stcb, net, tsn);
2340}
2341
2342static void
2343sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2344{
2345	/*
2346	 * Here we get a CWR from the peer. We must look in the outqueue and
2347	 * make sure that we have a covered ECNE in teh control chunk part.
2348	 * If so remove it.
2349	 */
2350	struct sctp_tmit_chunk *chk;
2351	struct sctp_ecne_chunk *ecne;
2352
2353	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2354		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2355			continue;
2356		}
2357		/*
2358		 * Look for and remove if it is the right TSN. Since there
2359		 * is only ONE ECNE on the control queue at any one time we
2360		 * don't need to worry about more than one!
2361		 */
2362		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2363		if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2364		    MAX_TSN) || (cp->tsn == ecne->tsn)) {
2365			/* this covers this ECNE, we can remove it */
2366			stcb->asoc.ecn_echo_cnt_onq--;
2367			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2368			    sctp_next);
2369			if (chk->data) {
2370				sctp_m_freem(chk->data);
2371				chk->data = NULL;
2372			}
2373			stcb->asoc.ctrl_queue_cnt--;
2374			sctp_free_remote_addr(chk->whoTo);
2375			sctp_free_a_chunk(stcb, chk);
2376			break;
2377		}
2378	}
2379}
2380
2381static void
2382sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2383    struct sctp_tcb *stcb, struct sctp_nets *net)
2384{
2385	struct sctp_association *asoc;
2386
2387	SCTPDBG(SCTP_DEBUG_INPUT2,
2388	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
2389	if (stcb == NULL)
2390		return;
2391
2392	asoc = &stcb->asoc;
2393	/* process according to association state */
2394	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
2395		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
2396		SCTP_TCB_UNLOCK(stcb);
2397		return;
2398	}
2399	/* notify upper layer protocol */
2400	if (stcb->sctp_socket) {
2401		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
2402		/* are the queues empty? they should be */
2403		if (!TAILQ_EMPTY(&asoc->send_queue) ||
2404		    !TAILQ_EMPTY(&asoc->sent_queue) ||
2405		    !TAILQ_EMPTY(&asoc->out_wheel)) {
2406			sctp_report_all_outbound(stcb, 0);
2407		}
2408	}
2409	/* stop the timer */
2410	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2411	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
2412	/* free the TCB */
2413	sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2414	return;
2415}
2416
2417static int
2418process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
2419    struct sctp_nets *net, uint8_t flg)
2420{
2421	switch (desc->chunk_type) {
2422		case SCTP_DATA:
2423		/* find the tsn to resend (possibly */
2424		{
2425			uint32_t tsn;
2426			struct sctp_tmit_chunk *tp1;
2427
2428			tsn = ntohl(desc->tsn_ifany);
2429			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2430			while (tp1) {
2431				if (tp1->rec.data.TSN_seq == tsn) {
2432					/* found it */
2433					break;
2434				}
2435				if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
2436				    MAX_TSN)) {
2437					/* not found */
2438					tp1 = NULL;
2439					break;
2440				}
2441				tp1 = TAILQ_NEXT(tp1, sctp_next);
2442			}
2443			if (tp1 == NULL) {
2444				/*
2445				 * Do it the other way , aka without paying
2446				 * attention to queue seq order.
2447				 */
2448				SCTP_STAT_INCR(sctps_pdrpdnfnd);
2449				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2450				while (tp1) {
2451					if (tp1->rec.data.TSN_seq == tsn) {
2452						/* found it */
2453						break;
2454					}
2455					tp1 = TAILQ_NEXT(tp1, sctp_next);
2456				}
2457			}
2458			if (tp1 == NULL) {
2459				SCTP_STAT_INCR(sctps_pdrptsnnf);
2460			}
2461			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
2462				uint8_t *ddp;
2463
2464				if ((stcb->asoc.peers_rwnd == 0) &&
2465				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
2466					SCTP_STAT_INCR(sctps_pdrpdiwnp);
2467					return (0);
2468				}
2469				if (stcb->asoc.peers_rwnd == 0 &&
2470				    (flg & SCTP_FROM_MIDDLE_BOX)) {
2471					SCTP_STAT_INCR(sctps_pdrpdizrw);
2472					return (0);
2473				}
2474				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
2475				    sizeof(struct sctp_data_chunk));
2476				{
2477					unsigned int iii;
2478
2479					for (iii = 0; iii < sizeof(desc->data_bytes);
2480					    iii++) {
2481						if (ddp[iii] != desc->data_bytes[iii]) {
2482							SCTP_STAT_INCR(sctps_pdrpbadd);
2483							return (-1);
2484						}
2485					}
2486				}
2487				/*
2488				 * We zero out the nonce so resync not
2489				 * needed
2490				 */
2491				tp1->rec.data.ect_nonce = 0;
2492
2493				if (tp1->do_rtt) {
2494					/*
2495					 * this guy had a RTO calculation
2496					 * pending on it, cancel it
2497					 */
2498					tp1->do_rtt = 0;
2499				}
2500				SCTP_STAT_INCR(sctps_pdrpmark);
2501				if (tp1->sent != SCTP_DATAGRAM_RESEND)
2502					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2503				tp1->sent = SCTP_DATAGRAM_RESEND;
2504				/*
2505				 * mark it as if we were doing a FR, since
2506				 * we will be getting gap ack reports behind
2507				 * the info from the router.
2508				 */
2509				tp1->rec.data.doing_fast_retransmit = 1;
2510				/*
2511				 * mark the tsn with what sequences can
2512				 * cause a new FR.
2513				 */
2514				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
2515					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
2516				} else {
2517					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
2518				}
2519
2520				/* restart the timer */
2521				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2522				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2523				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2524				    stcb, tp1->whoTo);
2525
2526				/* fix counts and things */
2527#ifdef SCTP_FLIGHT_LOGGING
2528				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
2529				    tp1->whoTo->flight_size,
2530				    tp1->book_size,
2531				    (uintptr_t) stcb,
2532				    tp1->rec.data.TSN_seq);
2533#endif
2534				sctp_flight_size_decrease(tp1);
2535				sctp_total_flight_decrease(stcb, tp1);
2536			} {
2537				/* audit code */
2538				unsigned int audit;
2539
2540				audit = 0;
2541				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
2542					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2543						audit++;
2544				}
2545				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
2546				    sctp_next) {
2547					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2548						audit++;
2549				}
2550				if (audit != stcb->asoc.sent_queue_retran_cnt) {
2551					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
2552					    audit, stcb->asoc.sent_queue_retran_cnt);
2553#ifndef SCTP_AUDITING_ENABLED
2554					stcb->asoc.sent_queue_retran_cnt = audit;
2555#endif
2556				}
2557			}
2558		}
2559		break;
2560	case SCTP_ASCONF:
2561		{
2562			struct sctp_tmit_chunk *asconf;
2563
2564			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
2565			    sctp_next) {
2566				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
2567					break;
2568				}
2569			}
2570			if (asconf) {
2571				if (asconf->sent != SCTP_DATAGRAM_RESEND)
2572					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2573				asconf->sent = SCTP_DATAGRAM_RESEND;
2574				asconf->snd_count--;
2575			}
2576		}
2577		break;
2578	case SCTP_INITIATION:
2579		/* resend the INIT */
2580		stcb->asoc.dropped_special_cnt++;
2581		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
2582			/*
2583			 * If we can get it in, in a few attempts we do
2584			 * this, otherwise we let the timer fire.
2585			 */
2586			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
2587			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
2588			sctp_send_initiate(stcb->sctp_ep, stcb);
2589		}
2590		break;
2591	case SCTP_SELECTIVE_ACK:
2592		/* resend the sack */
2593		sctp_send_sack(stcb);
2594		break;
2595	case SCTP_HEARTBEAT_REQUEST:
2596		/* resend a demand HB */
2597		(void)sctp_send_hb(stcb, 1, net);
2598		break;
2599	case SCTP_SHUTDOWN:
2600		sctp_send_shutdown(stcb, net);
2601		break;
2602	case SCTP_SHUTDOWN_ACK:
2603		sctp_send_shutdown_ack(stcb, net);
2604		break;
2605	case SCTP_COOKIE_ECHO:
2606		{
2607			struct sctp_tmit_chunk *cookie;
2608
2609			cookie = NULL;
2610			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
2611			    sctp_next) {
2612				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
2613					break;
2614				}
2615			}
2616			if (cookie) {
2617				if (cookie->sent != SCTP_DATAGRAM_RESEND)
2618					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2619				cookie->sent = SCTP_DATAGRAM_RESEND;
2620				sctp_stop_all_cookie_timers(stcb);
2621			}
2622		}
2623		break;
2624	case SCTP_COOKIE_ACK:
2625		sctp_send_cookie_ack(stcb);
2626		break;
2627	case SCTP_ASCONF_ACK:
2628		/* resend last asconf ack */
2629		sctp_send_asconf_ack(stcb, 1);
2630		break;
2631	case SCTP_FORWARD_CUM_TSN:
2632		send_forward_tsn(stcb, &stcb->asoc);
2633		break;
2634		/* can't do anything with these */
2635	case SCTP_PACKET_DROPPED:
2636	case SCTP_INITIATION_ACK:	/* this should not happen */
2637	case SCTP_HEARTBEAT_ACK:
2638	case SCTP_ABORT_ASSOCIATION:
2639	case SCTP_OPERATION_ERROR:
2640	case SCTP_SHUTDOWN_COMPLETE:
2641	case SCTP_ECN_ECHO:
2642	case SCTP_ECN_CWR:
2643	default:
2644		break;
2645	}
2646	return (0);
2647}
2648
2649void
2650sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2651{
2652	int i;
2653	uint16_t temp;
2654
2655	/*
2656	 * We set things to 0xffff since this is the last delivered sequence
2657	 * and we will be sending in 0 after the reset.
2658	 */
2659
2660	if (number_entries) {
2661		for (i = 0; i < number_entries; i++) {
2662			temp = ntohs(list[i]);
2663			if (temp >= stcb->asoc.streamincnt) {
2664				continue;
2665			}
2666			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
2667		}
2668	} else {
2669		list = NULL;
2670		for (i = 0; i < stcb->asoc.streamincnt; i++) {
2671			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
2672		}
2673	}
2674	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list);
2675}
2676
2677static void
2678sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2679{
2680	int i;
2681
2682	if (number_entries == 0) {
2683		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2684			stcb->asoc.strmout[i].next_sequence_sent = 0;
2685		}
2686	} else if (number_entries) {
2687		for (i = 0; i < number_entries; i++) {
2688			uint16_t temp;
2689
2690			temp = ntohs(list[i]);
2691			if (temp >= stcb->asoc.streamoutcnt) {
2692				/* no such stream */
2693				continue;
2694			}
2695			stcb->asoc.strmout[temp].next_sequence_sent = 0;
2696		}
2697	}
2698	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list);
2699}
2700
2701
2702struct sctp_stream_reset_out_request *
2703sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
2704{
2705	struct sctp_association *asoc;
2706	struct sctp_stream_reset_out_req *req;
2707	struct sctp_stream_reset_out_request *r;
2708	struct sctp_tmit_chunk *chk;
2709	int len, clen;
2710
2711	asoc = &stcb->asoc;
2712	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
2713		asoc->stream_reset_outstanding = 0;
2714		return (NULL);
2715	}
2716	if (stcb->asoc.str_reset == NULL) {
2717		asoc->stream_reset_outstanding = 0;
2718		return (NULL);
2719	}
2720	chk = stcb->asoc.str_reset;
2721	if (chk->data == NULL) {
2722		return (NULL);
2723	}
2724	if (bchk) {
2725		/* he wants a copy of the chk pointer */
2726		*bchk = chk;
2727	}
2728	clen = chk->send_size;
2729	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
2730	r = &req->sr_req;
2731	if (ntohl(r->request_seq) == seq) {
2732		/* found it */
2733		return (r);
2734	}
2735	len = SCTP_SIZE32(ntohs(r->ph.param_length));
2736	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
2737		/* move to the next one, there can only be a max of two */
2738		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
2739		if (ntohl(r->request_seq) == seq) {
2740			return (r);
2741		}
2742	}
2743	/* that seq is not here */
2744	return (NULL);
2745}
2746
2747static void
2748sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
2749{
2750	struct sctp_association *asoc;
2751	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
2752
2753	if (stcb->asoc.str_reset == NULL) {
2754		return;
2755	}
2756	asoc = &stcb->asoc;
2757
2758	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
2759	TAILQ_REMOVE(&asoc->control_send_queue,
2760	    chk,
2761	    sctp_next);
2762	if (chk->data) {
2763		sctp_m_freem(chk->data);
2764		chk->data = NULL;
2765	}
2766	asoc->ctrl_queue_cnt--;
2767	sctp_free_remote_addr(chk->whoTo);
2768
2769	sctp_free_a_chunk(stcb, chk);
2770	stcb->asoc.str_reset = NULL;
2771}
2772
2773
2774static int
2775sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
2776    uint32_t seq, uint32_t action,
2777    struct sctp_stream_reset_response *respin)
2778{
2779	uint16_t type;
2780	int lparm_len;
2781	struct sctp_association *asoc = &stcb->asoc;
2782	struct sctp_tmit_chunk *chk;
2783	struct sctp_stream_reset_out_request *srparam;
2784	int number_entries;
2785
2786	if (asoc->stream_reset_outstanding == 0) {
2787		/* duplicate */
2788		return (0);
2789	}
2790	if (seq == stcb->asoc.str_reset_seq_out) {
2791		srparam = sctp_find_stream_reset(stcb, seq, &chk);
2792		if (srparam) {
2793			stcb->asoc.str_reset_seq_out++;
2794			type = ntohs(srparam->ph.param_type);
2795			lparm_len = ntohs(srparam->ph.param_length);
2796			if (type == SCTP_STR_RESET_OUT_REQUEST) {
2797				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
2798				asoc->stream_reset_out_is_outstanding = 0;
2799				if (asoc->stream_reset_outstanding)
2800					asoc->stream_reset_outstanding--;
2801				if (action == SCTP_STREAM_RESET_PERFORMED) {
2802					/* do it */
2803					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
2804				} else {
2805					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams);
2806				}
2807			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
2808				/* Answered my request */
2809				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
2810				if (asoc->stream_reset_outstanding)
2811					asoc->stream_reset_outstanding--;
2812				if (action != SCTP_STREAM_RESET_PERFORMED) {
2813					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams);
2814				}
2815			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
2816				/**
2817				 * a) Adopt the new in tsn.
2818				 * b) reset the map
2819				 * c) Adopt the new out-tsn
2820				 */
2821				struct sctp_stream_reset_response_tsn *resp;
2822				struct sctp_forward_tsn_chunk fwdtsn;
2823				int abort_flag = 0;
2824
2825				if (respin == NULL) {
2826					/* huh ? */
2827					return (0);
2828				}
2829				if (action == SCTP_STREAM_RESET_PERFORMED) {
2830					resp = (struct sctp_stream_reset_response_tsn *)respin;
2831					asoc->stream_reset_outstanding--;
2832					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2833					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2834					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
2835					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2836					if (abort_flag) {
2837						return (1);
2838					}
2839					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
2840					stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2841					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
2842					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2843					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
2844					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
2845
2846					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2847					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2848
2849				}
2850			}
2851			/* get rid of the request and get the request flags */
2852			if (asoc->stream_reset_outstanding == 0) {
2853				sctp_clean_up_stream_reset(stcb);
2854			}
2855		}
2856	}
2857	return (0);
2858}
2859
2860static void
2861sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
2862    struct sctp_tmit_chunk *chk,
2863    struct sctp_stream_reset_in_request *req)
2864{
2865	uint32_t seq;
2866	int len, i;
2867	int number_entries;
2868	uint16_t temp;
2869
2870	/*
2871	 * peer wants me to send a str-reset to him for my outgoing seq's if
2872	 * seq_in is right.
2873	 */
2874	struct sctp_association *asoc = &stcb->asoc;
2875
2876	seq = ntohl(req->request_seq);
2877	if (asoc->str_reset_seq_in == seq) {
2878		if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
2879			len = ntohs(req->ph.param_length);
2880			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
2881			for (i = 0; i < number_entries; i++) {
2882				temp = ntohs(req->list_of_streams[i]);
2883				req->list_of_streams[i] = temp;
2884			}
2885			/* move the reset action back one */
2886			asoc->last_reset_action[1] = asoc->last_reset_action[0];
2887			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2888			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
2889			    asoc->str_reset_seq_out,
2890			    seq, (asoc->sending_seq - 1));
2891			asoc->stream_reset_out_is_outstanding = 1;
2892			asoc->str_reset = chk;
2893			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
2894			stcb->asoc.stream_reset_outstanding++;
2895		} else {
2896			/* Can't do it, since we have sent one out */
2897			asoc->last_reset_action[1] = asoc->last_reset_action[0];
2898			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
2899			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2900		}
2901		asoc->str_reset_seq_in++;
2902	} else if (asoc->str_reset_seq_in - 1 == seq) {
2903		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2904	} else if (asoc->str_reset_seq_in - 2 == seq) {
2905		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
2906	} else {
2907		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2908	}
2909}
2910
2911static int
2912sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
2913    struct sctp_tmit_chunk *chk,
2914    struct sctp_stream_reset_tsn_request *req)
2915{
2916	/* reset all in and out and update the tsn */
2917	/*
2918	 * A) reset my str-seq's on in and out. B) Select a receive next,
2919	 * and set cum-ack to it. Also process this selected number as a
2920	 * fwd-tsn as well. C) set in the response my next sending seq.
2921	 */
2922	struct sctp_forward_tsn_chunk fwdtsn;
2923	struct sctp_association *asoc = &stcb->asoc;
2924	int abort_flag = 0;
2925	uint32_t seq;
2926
2927	seq = ntohl(req->request_seq);
2928	if (asoc->str_reset_seq_in == seq) {
2929		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2930		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2931		fwdtsn.ch.chunk_flags = 0;
2932		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
2933		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2934		if (abort_flag) {
2935			return (1);
2936		}
2937		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
2938		stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2939		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
2940		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2941		atomic_add_int(&stcb->asoc.sending_seq, 1);
2942		/* save off historical data for retrans */
2943		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
2944		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
2945		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
2946		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
2947
2948		sctp_add_stream_reset_result_tsn(chk,
2949		    ntohl(req->request_seq),
2950		    SCTP_STREAM_RESET_PERFORMED,
2951		    stcb->asoc.sending_seq,
2952		    stcb->asoc.mapping_array_base_tsn);
2953		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2954		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2955		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
2956		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2957
2958		asoc->str_reset_seq_in++;
2959	} else if (asoc->str_reset_seq_in - 1 == seq) {
2960		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
2961		    stcb->asoc.last_sending_seq[0],
2962		    stcb->asoc.last_base_tsnsent[0]
2963		    );
2964	} else if (asoc->str_reset_seq_in - 2 == seq) {
2965		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
2966		    stcb->asoc.last_sending_seq[1],
2967		    stcb->asoc.last_base_tsnsent[1]
2968		    );
2969	} else {
2970		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2971	}
2972	return (0);
2973}
2974
2975static void
2976sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
2977    struct sctp_tmit_chunk *chk,
2978    struct sctp_stream_reset_out_request *req)
2979{
2980	uint32_t seq, tsn;
2981	int number_entries, len;
2982	struct sctp_association *asoc = &stcb->asoc;
2983
2984	seq = ntohl(req->request_seq);
2985
2986	/* now if its not a duplicate we process it */
2987	if (asoc->str_reset_seq_in == seq) {
2988		len = ntohs(req->ph.param_length);
2989		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
2990		/*
2991		 * the sender is resetting, handle the list issue.. we must
2992		 * a) verify if we can do the reset, if so no problem b) If
2993		 * we can't do the reset we must copy the request. c) queue
2994		 * it, and setup the data in processor to trigger it off
2995		 * when needed and dequeue all the queued data.
2996		 */
2997		tsn = ntohl(req->send_reset_at_tsn);
2998
2999		/* move the reset action back one */
3000		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3001		if ((tsn == asoc->cumulative_tsn) ||
3002		    (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3003			/* we can do it now */
3004			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3005			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3006			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3007		} else {
3008			/*
3009			 * we must queue it up and thus wait for the TSN's
3010			 * to arrive that are at or before tsn
3011			 */
3012			struct sctp_stream_reset_list *liste;
3013			int siz;
3014
3015			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3016			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3017			    siz, SCTP_M_STRESET);
3018			if (liste == NULL) {
3019				/* gak out of memory */
3020				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3021				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3022				return;
3023			}
3024			liste->tsn = tsn;
3025			liste->number_entries = number_entries;
3026			memcpy(&liste->req, req,
3027			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3028			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3029			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3030			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3031		}
3032		asoc->str_reset_seq_in++;
3033	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3034		/*
3035		 * one seq back, just echo back last action since my
3036		 * response was lost.
3037		 */
3038		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3039	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3040		/*
3041		 * two seq back, just echo back last action since my
3042		 * response was lost.
3043		 */
3044		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3045	} else {
3046		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3047	}
3048}
3049
3050static int
3051sctp_handle_stream_reset(struct sctp_tcb *stcb, struct sctp_stream_reset_out_req *sr_req)
3052{
3053	int chk_length, param_len, ptype;
3054	uint32_t seq;
3055	int num_req = 0;
3056	struct sctp_tmit_chunk *chk;
3057	struct sctp_chunkhdr *ch;
3058	struct sctp_paramhdr *ph;
3059	int ret_code = 0;
3060	int num_param = 0;
3061
3062	/* now it may be a reset or a reset-response */
3063	chk_length = ntohs(sr_req->ch.chunk_length);
3064
3065	/* setup for adding the response */
3066	sctp_alloc_a_chunk(stcb, chk);
3067	if (chk == NULL) {
3068		return (ret_code);
3069	}
3070	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3071	chk->rec.chunk_id.can_take_data = 0;
3072	chk->asoc = &stcb->asoc;
3073	chk->no_fr_allowed = 0;
3074	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3075	chk->book_size_scale = 0;
3076	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3077	if (chk->data == NULL) {
3078strres_nochunk:
3079		if (chk->data) {
3080			sctp_m_freem(chk->data);
3081			chk->data = NULL;
3082		}
3083		sctp_free_a_chunk(stcb, chk);
3084		return (ret_code);
3085	}
3086	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3087
3088	/* setup chunk parameters */
3089	chk->sent = SCTP_DATAGRAM_UNSENT;
3090	chk->snd_count = 0;
3091	chk->whoTo = stcb->asoc.primary_destination;
3092	atomic_add_int(&chk->whoTo->ref_count, 1);
3093
3094	ch = mtod(chk->data, struct sctp_chunkhdr *);
3095	ch->chunk_type = SCTP_STREAM_RESET;
3096	ch->chunk_flags = 0;
3097	ch->chunk_length = htons(chk->send_size);
3098	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3099	ph = (struct sctp_paramhdr *)&sr_req->sr_req;
3100	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3101		param_len = ntohs(ph->param_length);
3102		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3103			/* bad param */
3104			break;
3105		}
3106		ptype = ntohs(ph->param_type);
3107		num_param++;
3108		if (num_param > SCTP_MAX_RESET_PARAMS) {
3109			/* hit the max of parameters already sorry.. */
3110			break;
3111		}
3112		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3113			struct sctp_stream_reset_out_request *req_out;
3114
3115			req_out = (struct sctp_stream_reset_out_request *)ph;
3116			num_req++;
3117			if (stcb->asoc.stream_reset_outstanding) {
3118				seq = ntohl(req_out->response_seq);
3119				if (seq == stcb->asoc.str_reset_seq_out) {
3120					/* implicit ack */
3121					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3122				}
3123			}
3124			sctp_handle_str_reset_request_out(stcb, chk, req_out);
3125		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3126			struct sctp_stream_reset_in_request *req_in;
3127
3128			num_req++;
3129			req_in = (struct sctp_stream_reset_in_request *)ph;
3130			sctp_handle_str_reset_request_in(stcb, chk, req_in);
3131		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3132			struct sctp_stream_reset_tsn_request *req_tsn;
3133
3134			num_req++;
3135			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3136			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3137				ret_code = 1;
3138				goto strres_nochunk;
3139			}
3140			/* no more */
3141			break;
3142		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3143			struct sctp_stream_reset_response *resp;
3144			uint32_t result;
3145
3146			resp = (struct sctp_stream_reset_response *)ph;
3147			seq = ntohl(resp->response_seq);
3148			result = ntohl(resp->result);
3149			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3150				ret_code = 1;
3151				goto strres_nochunk;
3152			}
3153		} else {
3154			break;
3155		}
3156
3157		ph = (struct sctp_paramhdr *)((caddr_t)ph + SCTP_SIZE32(param_len));
3158		chk_length -= SCTP_SIZE32(param_len);
3159	}
3160	if (num_req == 0) {
3161		/* we have no response free the stuff */
3162		goto strres_nochunk;
3163	}
3164	/* ok we have a chunk to link in */
3165	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3166	    chk,
3167	    sctp_next);
3168	stcb->asoc.ctrl_queue_cnt++;
3169	return (ret_code);
3170}
3171
3172/*
3173 * Handle a router or endpoints report of a packet loss, there are two ways
3174 * to handle this, either we get the whole packet and must disect it
3175 * ourselves (possibly with truncation and or corruption) or it is a summary
3176 * from a middle box that did the disectting for us.
3177 */
3178static void
3179sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3180    struct sctp_tcb *stcb, struct sctp_nets *net)
3181{
3182	uint32_t bottle_bw, on_queue;
3183	uint16_t trunc_len;
3184	unsigned int chlen;
3185	unsigned int at;
3186	struct sctp_chunk_desc desc;
3187	struct sctp_chunkhdr *ch;
3188
3189	chlen = ntohs(cp->ch.chunk_length);
3190	chlen -= sizeof(struct sctp_pktdrop_chunk);
3191	/* XXX possible chlen underflow */
3192	if (chlen == 0) {
3193		ch = NULL;
3194		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3195			SCTP_STAT_INCR(sctps_pdrpbwrpt);
3196	} else {
3197		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3198		chlen -= sizeof(struct sctphdr);
3199		/* XXX possible chlen underflow */
3200		memset(&desc, 0, sizeof(desc));
3201	}
3202	trunc_len = (uint16_t) ntohs(cp->trunc_len);
3203	/* now the chunks themselves */
3204	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3205		desc.chunk_type = ch->chunk_type;
3206		/* get amount we need to move */
3207		at = ntohs(ch->chunk_length);
3208		if (at < sizeof(struct sctp_chunkhdr)) {
3209			/* corrupt chunk, maybe at the end? */
3210			SCTP_STAT_INCR(sctps_pdrpcrupt);
3211			break;
3212		}
3213		if (trunc_len == 0) {
3214			/* we are supposed to have all of it */
3215			if (at > chlen) {
3216				/* corrupt skip it */
3217				SCTP_STAT_INCR(sctps_pdrpcrupt);
3218				break;
3219			}
3220		} else {
3221			/* is there enough of it left ? */
3222			if (desc.chunk_type == SCTP_DATA) {
3223				if (chlen < (sizeof(struct sctp_data_chunk) +
3224				    sizeof(desc.data_bytes))) {
3225					break;
3226				}
3227			} else {
3228				if (chlen < sizeof(struct sctp_chunkhdr)) {
3229					break;
3230				}
3231			}
3232		}
3233		if (desc.chunk_type == SCTP_DATA) {
3234			/* can we get out the tsn? */
3235			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3236				SCTP_STAT_INCR(sctps_pdrpmbda);
3237
3238			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
3239				/* yep */
3240				struct sctp_data_chunk *dcp;
3241				uint8_t *ddp;
3242				unsigned int iii;
3243
3244				dcp = (struct sctp_data_chunk *)ch;
3245				ddp = (uint8_t *) (dcp + 1);
3246				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
3247					desc.data_bytes[iii] = ddp[iii];
3248				}
3249				desc.tsn_ifany = dcp->dp.tsn;
3250			} else {
3251				/* nope we are done. */
3252				SCTP_STAT_INCR(sctps_pdrpnedat);
3253				break;
3254			}
3255		} else {
3256			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3257				SCTP_STAT_INCR(sctps_pdrpmbct);
3258		}
3259
3260		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
3261			SCTP_STAT_INCR(sctps_pdrppdbrk);
3262			break;
3263		}
3264		if (SCTP_SIZE32(at) > chlen) {
3265			break;
3266		}
3267		chlen -= SCTP_SIZE32(at);
3268		if (chlen < sizeof(struct sctp_chunkhdr)) {
3269			/* done, none left */
3270			break;
3271		}
3272		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
3273	}
3274	/* Now update any rwnd --- possibly */
3275	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
3276		/* From a peer, we get a rwnd report */
3277		uint32_t a_rwnd;
3278
3279		SCTP_STAT_INCR(sctps_pdrpfehos);
3280
3281		bottle_bw = ntohl(cp->bottle_bw);
3282		on_queue = ntohl(cp->current_onq);
3283		if (bottle_bw && on_queue) {
3284			/* a rwnd report is in here */
3285			if (bottle_bw > on_queue)
3286				a_rwnd = bottle_bw - on_queue;
3287			else
3288				a_rwnd = 0;
3289
3290			if (a_rwnd == 0)
3291				stcb->asoc.peers_rwnd = 0;
3292			else {
3293				if (a_rwnd > stcb->asoc.total_flight) {
3294					stcb->asoc.peers_rwnd =
3295					    a_rwnd - stcb->asoc.total_flight;
3296				} else {
3297					stcb->asoc.peers_rwnd = 0;
3298				}
3299				if (stcb->asoc.peers_rwnd <
3300				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3301					/* SWS sender side engages */
3302					stcb->asoc.peers_rwnd = 0;
3303				}
3304			}
3305		}
3306	} else {
3307		SCTP_STAT_INCR(sctps_pdrpfmbox);
3308	}
3309
3310	/* now middle boxes in sat networks get a cwnd bump */
3311	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
3312	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
3313	    (stcb->asoc.sat_network)) {
3314		/*
3315		 * This is debateable but for sat networks it makes sense
3316		 * Note if a T3 timer has went off, we will prohibit any
3317		 * changes to cwnd until we exit the t3 loss recovery.
3318		 */
3319		uint32_t bw_avail;
3320		int rtt, incr;
3321
3322#ifdef SCTP_CWND_MONITOR
3323		int old_cwnd = net->cwnd;
3324
3325#endif
3326		/* need real RTT for this calc */
3327		rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
3328		/* get bottle neck bw */
3329		bottle_bw = ntohl(cp->bottle_bw);
3330		/* and whats on queue */
3331		on_queue = ntohl(cp->current_onq);
3332		/*
3333		 * adjust the on-queue if our flight is more it could be
3334		 * that the router has not yet gotten data "in-flight" to it
3335		 */
3336		if (on_queue < net->flight_size)
3337			on_queue = net->flight_size;
3338
3339		/* calculate the available space */
3340		bw_avail = (bottle_bw * rtt) / 1000;
3341		if (bw_avail > bottle_bw) {
3342			/*
3343			 * Cap the growth to no more than the bottle neck.
3344			 * This can happen as RTT slides up due to queues.
3345			 * It also means if you have more than a 1 second
3346			 * RTT with a empty queue you will be limited to the
3347			 * bottle_bw per second no matter if other points
3348			 * have 1/2 the RTT and you could get more out...
3349			 */
3350			bw_avail = bottle_bw;
3351		}
3352		if (on_queue > bw_avail) {
3353			/*
3354			 * No room for anything else don't allow anything
3355			 * else to be "added to the fire".
3356			 */
3357			int seg_inflight, seg_onqueue, my_portion;
3358
3359			net->partial_bytes_acked = 0;
3360
3361			/* how much are we over queue size? */
3362			incr = on_queue - bw_avail;
3363			if (stcb->asoc.seen_a_sack_this_pkt) {
3364				/*
3365				 * undo any cwnd adjustment that the sack
3366				 * might have made
3367				 */
3368				net->cwnd = net->prev_cwnd;
3369			}
3370			/* Now how much of that is mine? */
3371			seg_inflight = net->flight_size / net->mtu;
3372			seg_onqueue = on_queue / net->mtu;
3373			my_portion = (incr * seg_inflight) / seg_onqueue;
3374
3375			/* Have I made an adjustment already */
3376			if (net->cwnd > net->flight_size) {
3377				/*
3378				 * for this flight I made an adjustment we
3379				 * need to decrease the portion by a share
3380				 * our previous adjustment.
3381				 */
3382				int diff_adj;
3383
3384				diff_adj = net->cwnd - net->flight_size;
3385				if (diff_adj > my_portion)
3386					my_portion = 0;
3387				else
3388					my_portion -= diff_adj;
3389			}
3390			/*
3391			 * back down to the previous cwnd (assume we have
3392			 * had a sack before this packet). minus what ever
3393			 * portion of the overage is my fault.
3394			 */
3395			net->cwnd -= my_portion;
3396
3397			/* we will NOT back down more than 1 MTU */
3398			if (net->cwnd <= net->mtu) {
3399				net->cwnd = net->mtu;
3400			}
3401			/* force into CA */
3402			net->ssthresh = net->cwnd - 1;
3403		} else {
3404			/*
3405			 * Take 1/4 of the space left or max burst up ..
3406			 * whichever is less.
3407			 */
3408			incr = min((bw_avail - on_queue) >> 2,
3409			    stcb->asoc.max_burst * net->mtu);
3410			net->cwnd += incr;
3411		}
3412		if (net->cwnd > bw_avail) {
3413			/* We can't exceed the pipe size */
3414			net->cwnd = bw_avail;
3415		}
3416		if (net->cwnd < net->mtu) {
3417			/* We always have 1 MTU */
3418			net->cwnd = net->mtu;
3419		}
3420#ifdef SCTP_CWND_MONITOR
3421		if (net->cwnd - old_cwnd != 0) {
3422			/* log only changes */
3423			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
3424			    SCTP_CWND_LOG_FROM_SAT);
3425		}
3426#endif
3427	}
3428}
3429
3430/*
3431 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
3432 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
3433 * offset: offset into the mbuf chain to first chunkhdr - length: is the
3434 * length of the complete packet outputs: - length: modified to remaining
3435 * length after control processing - netp: modified to new sctp_nets after
3436 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
3437 * bad packet,...) otherwise return the tcb for this packet
3438 */
3439#ifdef __GNUC__
3440__attribute__((noinline))
3441#endif
3442	static struct sctp_tcb *
3443	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
3444             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
3445             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
3446             uint32_t vrf_id)
3447{
3448	struct sctp_association *asoc;
3449	uint32_t vtag_in;
3450	int num_chunks = 0;	/* number of control chunks processed */
3451	uint32_t chk_length;
3452	int ret;
3453	int abort_no_unlock = 0;
3454
3455	/*
3456	 * How big should this be, and should it be alloc'd? Lets try the
3457	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
3458	 * until we get into jumbo grams and such..
3459	 */
3460	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
3461	struct sctp_tcb *locked_tcb = stcb;
3462	int got_auth = 0;
3463	uint32_t auth_offset = 0, auth_len = 0;
3464	int auth_skipped = 0;
3465
3466	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
3467	    iphlen, *offset, length, stcb);
3468
3469	/* validate chunk header length... */
3470	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
3471		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
3472		    ntohs(ch->chunk_length));
3473		if (locked_tcb) {
3474			SCTP_TCB_UNLOCK(locked_tcb);
3475		}
3476		return (NULL);
3477	}
3478	/*
3479	 * validate the verification tag
3480	 */
3481	vtag_in = ntohl(sh->v_tag);
3482
3483	if (locked_tcb) {
3484		SCTP_TCB_LOCK_ASSERT(locked_tcb);
3485	}
3486	if (ch->chunk_type == SCTP_INITIATION) {
3487		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
3488		    ntohs(ch->chunk_length), vtag_in);
3489		if (vtag_in != 0) {
3490			/* protocol error- silently discard... */
3491			SCTP_STAT_INCR(sctps_badvtag);
3492			if (locked_tcb) {
3493				SCTP_TCB_UNLOCK(locked_tcb);
3494			}
3495			return (NULL);
3496		}
3497	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
3498		/*
3499		 * If there is no stcb, skip the AUTH chunk and process
3500		 * later after a stcb is found (to validate the lookup was
3501		 * valid.
3502		 */
3503		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
3504		    (stcb == NULL) && !sctp_auth_disable) {
3505			/* save this chunk for later processing */
3506			auth_skipped = 1;
3507			auth_offset = *offset;
3508			auth_len = ntohs(ch->chunk_length);
3509
3510			/* (temporarily) move past this chunk */
3511			*offset += SCTP_SIZE32(auth_len);
3512			if (*offset >= length) {
3513				/* no more data left in the mbuf chain */
3514				*offset = length;
3515				if (locked_tcb) {
3516					SCTP_TCB_UNLOCK(locked_tcb);
3517				}
3518				return (NULL);
3519			}
3520			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3521			    sizeof(struct sctp_chunkhdr), chunk_buf);
3522		}
3523		if (ch == NULL) {
3524			/* Help */
3525			*offset = length;
3526			if (locked_tcb) {
3527				SCTP_TCB_UNLOCK(locked_tcb);
3528			}
3529			return (NULL);
3530		}
3531		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3532			goto process_control_chunks;
3533		}
3534		/*
3535		 * first check if it's an ASCONF with an unknown src addr we
3536		 * need to look inside to find the association
3537		 */
3538		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
3539			/* inp's refcount may be reduced */
3540			SCTP_INP_INCR_REF(inp);
3541
3542			stcb = sctp_findassociation_ep_asconf(m, iphlen,
3543			    *offset, sh, &inp, netp);
3544			if (stcb == NULL) {
3545				/*
3546				 * reduce inp's refcount if not reduced in
3547				 * sctp_findassociation_ep_asconf().
3548				 */
3549				SCTP_INP_DECR_REF(inp);
3550			}
3551			/* now go back and verify any auth chunk to be sure */
3552			if (auth_skipped && (stcb != NULL)) {
3553				struct sctp_auth_chunk *auth;
3554
3555				auth = (struct sctp_auth_chunk *)
3556				    sctp_m_getptr(m, auth_offset,
3557				    auth_len, chunk_buf);
3558				got_auth = 1;
3559				auth_skipped = 0;
3560				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
3561				    auth_offset)) {
3562					/* auth HMAC failed so dump it */
3563					*offset = length;
3564					if (locked_tcb) {
3565						SCTP_TCB_UNLOCK(locked_tcb);
3566					}
3567					return (NULL);
3568				} else {
3569					/* remaining chunks are HMAC checked */
3570					stcb->asoc.authenticated = 1;
3571				}
3572			}
3573		}
3574		if (stcb == NULL) {
3575			/* no association, so it's out of the blue... */
3576			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
3577			    vrf_id);
3578			*offset = length;
3579			if (locked_tcb) {
3580				SCTP_TCB_UNLOCK(locked_tcb);
3581			}
3582			return (NULL);
3583		}
3584		asoc = &stcb->asoc;
3585		/* ABORT and SHUTDOWN can use either v_tag... */
3586		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
3587		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
3588		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
3589			if ((vtag_in == asoc->my_vtag) ||
3590			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
3591			    (vtag_in == asoc->peer_vtag))) {
3592				/* this is valid */
3593			} else {
3594				/* drop this packet... */
3595				SCTP_STAT_INCR(sctps_badvtag);
3596				if (locked_tcb) {
3597					SCTP_TCB_UNLOCK(locked_tcb);
3598				}
3599				return (NULL);
3600			}
3601		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
3602			if (vtag_in != asoc->my_vtag) {
3603				/*
3604				 * this could be a stale SHUTDOWN-ACK or the
3605				 * peer never got the SHUTDOWN-COMPLETE and
3606				 * is still hung; we have started a new asoc
3607				 * but it won't complete until the shutdown
3608				 * is completed
3609				 */
3610				if (locked_tcb) {
3611					SCTP_TCB_UNLOCK(locked_tcb);
3612				}
3613				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
3614				    NULL, vrf_id);
3615				return (NULL);
3616			}
3617		} else {
3618			/* for all other chunks, vtag must match */
3619			if (vtag_in != asoc->my_vtag) {
3620				/* invalid vtag... */
3621				SCTPDBG(SCTP_DEBUG_INPUT3,
3622				    "invalid vtag: %xh, expect %xh\n",
3623				    vtag_in, asoc->my_vtag);
3624				SCTP_STAT_INCR(sctps_badvtag);
3625				if (locked_tcb) {
3626					SCTP_TCB_UNLOCK(locked_tcb);
3627				}
3628				*offset = length;
3629				return (NULL);
3630			}
3631		}
3632	}			/* end if !SCTP_COOKIE_ECHO */
3633	/*
3634	 * process all control chunks...
3635	 */
3636	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
3637	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
3638	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
3639		/* implied cookie-ack.. we must have lost the ack */
3640		stcb->asoc.overall_error_count = 0;
3641		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
3642		    *netp);
3643	}
3644process_control_chunks:
3645	while (IS_SCTP_CONTROL(ch)) {
3646		/* validate chunk length */
3647		chk_length = ntohs(ch->chunk_length);
3648		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
3649		    ch->chunk_type, chk_length);
3650		if (chk_length < sizeof(*ch) ||
3651		    (*offset + (int)chk_length) > length) {
3652			*offset = length;
3653			if (locked_tcb) {
3654				SCTP_TCB_UNLOCK(locked_tcb);
3655			}
3656			return (NULL);
3657		}
3658		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
3659		/*
3660		 * INIT-ACK only gets the init ack "header" portion only
3661		 * because we don't have to process the peer's COOKIE. All
3662		 * others get a complete chunk.
3663		 */
3664		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
3665		    (ch->chunk_type == SCTP_INITIATION)) {
3666			/* get an init-ack chunk */
3667			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3668			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
3669			if (ch == NULL) {
3670				*offset = length;
3671				if (locked_tcb) {
3672					SCTP_TCB_UNLOCK(locked_tcb);
3673				}
3674				return (NULL);
3675			}
3676		} else if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3677			if (chk_length > sizeof(chunk_buf)) {
3678				/*
3679				 * use just the size of the chunk buffer so
3680				 * the front part of our cookie is intact.
3681				 * The rest of cookie processing should use
3682				 * the sctp_m_getptr() function to access
3683				 * the other parts.
3684				 */
3685				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3686				    (sizeof(chunk_buf) - 4),
3687				    chunk_buf);
3688				if (ch == NULL) {
3689					*offset = length;
3690					if (locked_tcb) {
3691						SCTP_TCB_UNLOCK(locked_tcb);
3692					}
3693					return (NULL);
3694				}
3695			} else {
3696				/* We can fit it all */
3697				goto all_fits;
3698			}
3699		} else {
3700			/* get a complete chunk... */
3701			if (chk_length > sizeof(chunk_buf)) {
3702				struct mbuf *oper;
3703				struct sctp_paramhdr *phdr;
3704
3705				oper = NULL;
3706				if (stcb) {
3707					oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
3708					    0, M_DONTWAIT, 1, MT_DATA);
3709
3710					if (oper) {
3711						/* pre-reserve some space */
3712						SCTP_BUF_RESV_UF(oper, sizeof(struct sctp_chunkhdr));
3713						SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
3714						phdr = mtod(oper, struct sctp_paramhdr *);
3715						phdr->param_type = htons(SCTP_CAUSE_OUT_OF_RESC);
3716						phdr->param_length = htons(sizeof(struct sctp_paramhdr));
3717						sctp_queue_op_err(stcb, oper);
3718					}
3719				}
3720				if (locked_tcb) {
3721					SCTP_TCB_UNLOCK(locked_tcb);
3722				}
3723				return (NULL);
3724			}
3725	all_fits:
3726			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3727			    chk_length, chunk_buf);
3728			if (ch == NULL) {
3729				SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
3730				*offset = length;
3731				if (locked_tcb) {
3732					SCTP_TCB_UNLOCK(locked_tcb);
3733				}
3734				return (NULL);
3735			}
3736		}
3737		num_chunks++;
3738		/* Save off the last place we got a control from */
3739		if (stcb != NULL) {
3740			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
3741				/*
3742				 * allow last_control to be NULL if
3743				 * ASCONF... ASCONF processing will find the
3744				 * right net later
3745				 */
3746				if ((netp != NULL) && (*netp != NULL))
3747					stcb->asoc.last_control_chunk_from = *netp;
3748			}
3749		}
3750#ifdef SCTP_AUDITING_ENABLED
3751		sctp_audit_log(0xB0, ch->chunk_type);
3752#endif
3753
3754		/* check to see if this chunk required auth, but isn't */
3755		if ((stcb != NULL) && !sctp_auth_disable &&
3756		    sctp_auth_is_required_chunk(ch->chunk_type,
3757		    stcb->asoc.local_auth_chunks) &&
3758		    !stcb->asoc.authenticated) {
3759			/* "silently" ignore */
3760			SCTP_STAT_INCR(sctps_recvauthmissing);
3761			goto next_chunk;
3762		}
3763		switch (ch->chunk_type) {
3764		case SCTP_INITIATION:
3765			/* must be first and only chunk */
3766			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
3767			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3768				/* We are not interested anymore? */
3769				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3770					/*
3771					 * collision case where we are
3772					 * sending to them too
3773					 */
3774					;
3775				} else {
3776					if (locked_tcb) {
3777						SCTP_TCB_UNLOCK(locked_tcb);
3778					}
3779					*offset = length;
3780					return (NULL);
3781				}
3782			}
3783			if ((num_chunks > 1) ||
3784			    (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3785				*offset = length;
3786				if (locked_tcb) {
3787					SCTP_TCB_UNLOCK(locked_tcb);
3788				}
3789				return (NULL);
3790			}
3791			if ((stcb != NULL) &&
3792			    (SCTP_GET_STATE(&stcb->asoc) ==
3793			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
3794				sctp_send_shutdown_ack(stcb,
3795				    stcb->asoc.primary_destination);
3796				*offset = length;
3797				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3798				if (locked_tcb) {
3799					SCTP_TCB_UNLOCK(locked_tcb);
3800				}
3801				return (NULL);
3802			}
3803			if (netp) {
3804				sctp_handle_init(m, iphlen, *offset, sh,
3805				    (struct sctp_init_chunk *)ch, inp,
3806				    stcb, *netp, &abort_no_unlock, vrf_id);
3807			}
3808			if (abort_no_unlock)
3809				return (NULL);
3810
3811			*offset = length;
3812			if (locked_tcb) {
3813				SCTP_TCB_UNLOCK(locked_tcb);
3814			}
3815			return (NULL);
3816			break;
3817		case SCTP_INITIATION_ACK:
3818			/* must be first and only chunk */
3819			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
3820			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3821				/* We are not interested anymore */
3822				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3823					;
3824				} else {
3825					if (locked_tcb) {
3826						SCTP_TCB_UNLOCK(locked_tcb);
3827					}
3828					*offset = length;
3829					if (stcb) {
3830						sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3831					}
3832					return (NULL);
3833				}
3834			}
3835			if ((num_chunks > 1) ||
3836			    (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3837				*offset = length;
3838				if (locked_tcb) {
3839					SCTP_TCB_UNLOCK(locked_tcb);
3840				}
3841				return (NULL);
3842			}
3843			if ((netp) && (*netp)) {
3844				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
3845				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
3846			} else {
3847				ret = -1;
3848			}
3849			/*
3850			 * Special case, I must call the output routine to
3851			 * get the cookie echoed
3852			 */
3853			if (abort_no_unlock)
3854				return (NULL);
3855
3856			if ((stcb) && ret == 0)
3857				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3858			*offset = length;
3859			if (locked_tcb) {
3860				SCTP_TCB_UNLOCK(locked_tcb);
3861			}
3862			return (NULL);
3863			break;
3864		case SCTP_SELECTIVE_ACK:
3865			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
3866			SCTP_STAT_INCR(sctps_recvsacks);
3867			{
3868				struct sctp_sack_chunk *sack;
3869				int abort_now = 0;
3870				uint32_t a_rwnd, cum_ack;
3871				uint16_t num_seg;
3872				int nonce_sum_flag;
3873
3874				if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
3875					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
3876					*offset = length;
3877					if (locked_tcb) {
3878						SCTP_TCB_UNLOCK(locked_tcb);
3879					}
3880					return (NULL);
3881				}
3882				sack = (struct sctp_sack_chunk *)ch;
3883				nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
3884				cum_ack = ntohl(sack->sack.cum_tsn_ack);
3885				num_seg = ntohs(sack->sack.num_gap_ack_blks);
3886				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
3887				stcb->asoc.seen_a_sack_this_pkt = 1;
3888				if ((stcb->asoc.pr_sctp_cnt == 0) &&
3889				    (num_seg == 0) &&
3890				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
3891				    (cum_ack == stcb->asoc.last_acked_seq)) &&
3892				    (stcb->asoc.saw_sack_with_frags == 0) &&
3893				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
3894				    ) {
3895					/*
3896					 * We have a SIMPLE sack having no
3897					 * prior segments and data on sent
3898					 * queue to be acked.. Use the
3899					 * faster path sack processing. We
3900					 * also allow window update sacks
3901					 * with no missing segments to go
3902					 * this way too.
3903					 */
3904					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
3905					    &abort_now);
3906				} else {
3907					if (netp && *netp)
3908						sctp_handle_sack(sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
3909				}
3910				if (abort_now) {
3911					/* ABORT signal from sack processing */
3912					*offset = length;
3913					return (NULL);
3914				}
3915			}
3916			break;
3917		case SCTP_HEARTBEAT_REQUEST:
3918			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
3919			if ((stcb) && netp && *netp) {
3920				SCTP_STAT_INCR(sctps_recvheartbeat);
3921				sctp_send_heartbeat_ack(stcb, m, *offset,
3922				    chk_length, *netp);
3923
3924				/* He's alive so give him credit */
3925				stcb->asoc.overall_error_count = 0;
3926			}
3927			break;
3928		case SCTP_HEARTBEAT_ACK:
3929			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
3930			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
3931				/* Its not ours */
3932				*offset = length;
3933				if (locked_tcb) {
3934					SCTP_TCB_UNLOCK(locked_tcb);
3935				}
3936				return (NULL);
3937			}
3938			/* He's alive so give him credit */
3939			stcb->asoc.overall_error_count = 0;
3940			SCTP_STAT_INCR(sctps_recvheartbeatack);
3941			if (netp && *netp)
3942				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
3943				    stcb, *netp);
3944			break;
3945		case SCTP_ABORT_ASSOCIATION:
3946			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
3947			    stcb);
3948			if ((stcb) && netp && *netp)
3949				sctp_handle_abort((struct sctp_abort_chunk *)ch,
3950				    stcb, *netp);
3951			*offset = length;
3952			return (NULL);
3953			break;
3954		case SCTP_SHUTDOWN:
3955			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
3956			    stcb);
3957			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
3958				*offset = length;
3959				if (locked_tcb) {
3960					SCTP_TCB_UNLOCK(locked_tcb);
3961				}
3962				return (NULL);
3963
3964			}
3965			if (netp && *netp) {
3966				int abort_flag = 0;
3967
3968				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
3969				    stcb, *netp, &abort_flag);
3970				if (abort_flag) {
3971					*offset = length;
3972					return (NULL);
3973				}
3974			}
3975			break;
3976		case SCTP_SHUTDOWN_ACK:
3977			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
3978			if ((stcb) && (netp) && (*netp))
3979				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
3980			*offset = length;
3981			return (NULL);
3982			break;
3983
3984		case SCTP_OPERATION_ERROR:
3985			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
3986			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
3987
3988				*offset = length;
3989				return (NULL);
3990			}
3991			break;
3992		case SCTP_COOKIE_ECHO:
3993			SCTPDBG(SCTP_DEBUG_INPUT3,
3994			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
3995			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3996				;
3997			} else {
3998				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3999					/* We are not interested anymore */
4000					*offset = length;
4001					return (NULL);
4002				}
4003			}
4004			/*
4005			 * First are we accepting? We do this again here
4006			 * sincen it is possible that a previous endpoint
4007			 * WAS listening responded to a INIT-ACK and then
4008			 * closed. We opened and bound.. and are now no
4009			 * longer listening.
4010			 */
4011			if (inp->sctp_socket->so_qlimit == 0) {
4012				if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4013					/*
4014					 * special case, is this a retran'd
4015					 * COOKIE-ECHO or a restarting assoc
4016					 * that is a peeled off or
4017					 * one-to-one style socket.
4018					 */
4019					goto process_cookie_anyway;
4020				}
4021				sctp_abort_association(inp, stcb, m, iphlen,
4022				    sh, NULL, vrf_id);
4023				*offset = length;
4024				return (NULL);
4025			} else if (inp->sctp_socket->so_qlimit) {
4026				/* we are accepting so check limits like TCP */
4027				if (inp->sctp_socket->so_qlen >
4028				    inp->sctp_socket->so_qlimit) {
4029					/* no space */
4030					struct mbuf *oper;
4031					struct sctp_paramhdr *phdr;
4032
4033					if (sctp_abort_if_one_2_one_hits_limit) {
4034						oper = NULL;
4035						oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4036						    0, M_DONTWAIT, 1, MT_DATA);
4037						if (oper) {
4038							SCTP_BUF_LEN(oper) =
4039							    sizeof(struct sctp_paramhdr);
4040							phdr = mtod(oper,
4041							    struct sctp_paramhdr *);
4042							phdr->param_type =
4043							    htons(SCTP_CAUSE_OUT_OF_RESC);
4044							phdr->param_length =
4045							    htons(sizeof(struct sctp_paramhdr));
4046						}
4047						sctp_abort_association(inp, stcb, m,
4048						    iphlen, sh, oper, vrf_id);
4049					}
4050					*offset = length;
4051					return (NULL);
4052				}
4053			}
4054	process_cookie_anyway:
4055			{
4056				struct mbuf *ret_buf;
4057				struct sctp_inpcb *linp;
4058
4059				if (stcb) {
4060					linp = NULL;
4061				} else {
4062					linp = inp;
4063				}
4064
4065				if (linp) {
4066					SCTP_ASOC_CREATE_LOCK(linp);
4067				}
4068				if (netp) {
4069					ret_buf =
4070					    sctp_handle_cookie_echo(m, iphlen,
4071					    *offset, sh,
4072					    (struct sctp_cookie_echo_chunk *)ch,
4073					    &inp, &stcb, netp,
4074					    auth_skipped,
4075					    auth_offset,
4076					    auth_len,
4077					    &locked_tcb,
4078					    vrf_id);
4079				} else {
4080					ret_buf = NULL;
4081				}
4082				if (linp) {
4083					SCTP_ASOC_CREATE_UNLOCK(linp);
4084				}
4085				if (ret_buf == NULL) {
4086					if (locked_tcb) {
4087						SCTP_TCB_UNLOCK(locked_tcb);
4088					}
4089					SCTPDBG(SCTP_DEBUG_INPUT3,
4090					    "GAK, null buffer\n");
4091					auth_skipped = 0;
4092					*offset = length;
4093					return (NULL);
4094				}
4095				/* if AUTH skipped, see if it verified... */
4096				if (auth_skipped) {
4097					got_auth = 1;
4098					auth_skipped = 0;
4099				}
4100				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4101					/*
4102					 * Restart the timer if we have
4103					 * pending data
4104					 */
4105					struct sctp_tmit_chunk *chk;
4106
4107					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4108					if (chk) {
4109						sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4110						    stcb->sctp_ep, stcb,
4111						    chk->whoTo);
4112					}
4113				}
4114			}
4115			break;
4116		case SCTP_COOKIE_ACK:
4117			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4118			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4119				if (locked_tcb) {
4120					SCTP_TCB_UNLOCK(locked_tcb);
4121				}
4122				return (NULL);
4123			}
4124			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4125				/* We are not interested anymore */
4126				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4127					;
4128				} else if (stcb) {
4129					sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4130					*offset = length;
4131					return (NULL);
4132				}
4133			}
4134			/* He's alive so give him credit */
4135			if ((stcb) && netp && *netp) {
4136				stcb->asoc.overall_error_count = 0;
4137				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4138			}
4139			break;
4140		case SCTP_ECN_ECHO:
4141			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4142			/* He's alive so give him credit */
4143			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4144				/* Its not ours */
4145				if (locked_tcb) {
4146					SCTP_TCB_UNLOCK(locked_tcb);
4147				}
4148				*offset = length;
4149				return (NULL);
4150			}
4151			if (stcb) {
4152				stcb->asoc.overall_error_count = 0;
4153				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
4154				    stcb);
4155			}
4156			break;
4157		case SCTP_ECN_CWR:
4158			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
4159			/* He's alive so give him credit */
4160			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
4161				/* Its not ours */
4162				if (locked_tcb) {
4163					SCTP_TCB_UNLOCK(locked_tcb);
4164				}
4165				*offset = length;
4166				return (NULL);
4167			}
4168			if (stcb) {
4169				stcb->asoc.overall_error_count = 0;
4170				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
4171			}
4172			break;
4173		case SCTP_SHUTDOWN_COMPLETE:
4174			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
4175			/* must be first and only chunk */
4176			if ((num_chunks > 1) ||
4177			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4178				*offset = length;
4179				if (locked_tcb) {
4180					SCTP_TCB_UNLOCK(locked_tcb);
4181				}
4182				return (NULL);
4183			}
4184			if ((stcb) && netp && *netp) {
4185				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
4186				    stcb, *netp);
4187			}
4188			*offset = length;
4189			return (NULL);
4190			break;
4191		case SCTP_ASCONF:
4192			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
4193			/* He's alive so give him credit */
4194			if (stcb) {
4195				stcb->asoc.overall_error_count = 0;
4196				sctp_handle_asconf(m, *offset,
4197				    (struct sctp_asconf_chunk *)ch, stcb);
4198			}
4199			break;
4200		case SCTP_ASCONF_ACK:
4201			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
4202			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
4203				/* Its not ours */
4204				if (locked_tcb) {
4205					SCTP_TCB_UNLOCK(locked_tcb);
4206				}
4207				*offset = length;
4208				return (NULL);
4209			}
4210			if ((stcb) && netp && *netp) {
4211				/* He's alive so give him credit */
4212				stcb->asoc.overall_error_count = 0;
4213				sctp_handle_asconf_ack(m, *offset,
4214				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp);
4215			}
4216			break;
4217		case SCTP_FORWARD_CUM_TSN:
4218			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
4219			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
4220				/* Its not ours */
4221				if (locked_tcb) {
4222					SCTP_TCB_UNLOCK(locked_tcb);
4223				}
4224				*offset = length;
4225				return (NULL);
4226			}
4227			/* He's alive so give him credit */
4228			if (stcb) {
4229				int abort_flag = 0;
4230
4231				stcb->asoc.overall_error_count = 0;
4232				*fwd_tsn_seen = 1;
4233				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4234					/* We are not interested anymore */
4235					sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
4236					*offset = length;
4237					return (NULL);
4238				}
4239				sctp_handle_forward_tsn(stcb,
4240				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag);
4241				if (abort_flag) {
4242					*offset = length;
4243					return (NULL);
4244				} else {
4245					stcb->asoc.overall_error_count = 0;
4246				}
4247
4248			}
4249			break;
4250		case SCTP_STREAM_RESET:
4251			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
4252			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4253			    chk_length, chunk_buf);
4254			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
4255				/* Its not ours */
4256				if (locked_tcb) {
4257					SCTP_TCB_UNLOCK(locked_tcb);
4258				}
4259				*offset = length;
4260				return (NULL);
4261			}
4262			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4263				/* We are not interested anymore */
4264				sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4265				*offset = length;
4266				return (NULL);
4267			}
4268			if (stcb->asoc.peer_supports_strreset == 0) {
4269				/*
4270				 * hmm, peer should have announced this, but
4271				 * we will turn it on since he is sending us
4272				 * a stream reset.
4273				 */
4274				stcb->asoc.peer_supports_strreset = 1;
4275			}
4276			if (sctp_handle_stream_reset(stcb, (struct sctp_stream_reset_out_req *)ch)) {
4277				/* stop processing */
4278				*offset = length;
4279				return (NULL);
4280			}
4281			break;
4282		case SCTP_PACKET_DROPPED:
4283			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
4284			/* re-get it all please */
4285			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
4286				/* Its not ours */
4287				if (locked_tcb) {
4288					SCTP_TCB_UNLOCK(locked_tcb);
4289				}
4290				*offset = length;
4291				return (NULL);
4292			}
4293			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4294			    chk_length, chunk_buf);
4295
4296			if (ch && (stcb) && netp && (*netp)) {
4297				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
4298				    stcb, *netp);
4299			}
4300			break;
4301
4302		case SCTP_AUTHENTICATION:
4303			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
4304			if (sctp_auth_disable)
4305				goto unknown_chunk;
4306
4307			if (stcb == NULL) {
4308				/* save the first AUTH for later processing */
4309				if (auth_skipped == 0) {
4310					auth_offset = *offset;
4311					auth_len = chk_length;
4312					auth_skipped = 1;
4313				}
4314				/* skip this chunk (temporarily) */
4315				goto next_chunk;
4316			}
4317			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
4318			    (chk_length > (sizeof(struct sctp_auth_chunk) +
4319			    SCTP_AUTH_DIGEST_LEN_MAX))) {
4320				/* Its not ours */
4321				if (locked_tcb) {
4322					SCTP_TCB_UNLOCK(locked_tcb);
4323				}
4324				*offset = length;
4325				return (NULL);
4326			}
4327			if (got_auth == 1) {
4328				/* skip this chunk... it's already auth'd */
4329				goto next_chunk;
4330			}
4331			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4332			    chk_length, chunk_buf);
4333			got_auth = 1;
4334			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
4335			    m, *offset)) {
4336				/* auth HMAC failed so dump the packet */
4337				*offset = length;
4338				return (stcb);
4339			} else {
4340				/* remaining chunks are HMAC checked */
4341				stcb->asoc.authenticated = 1;
4342			}
4343			break;
4344
4345		default:
4346	unknown_chunk:
4347			/* it's an unknown chunk! */
4348			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
4349				struct mbuf *mm;
4350				struct sctp_paramhdr *phd;
4351
4352				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4353				    0, M_DONTWAIT, 1, MT_DATA);
4354				if (mm) {
4355					phd = mtod(mm, struct sctp_paramhdr *);
4356					/*
4357					 * We cheat and use param type since
4358					 * we did not bother to define a
4359					 * error cause struct. They are the
4360					 * same basic format with different
4361					 * names.
4362					 */
4363					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
4364					phd->param_length = htons(chk_length + sizeof(*phd));
4365					SCTP_BUF_LEN(mm) = sizeof(*phd);
4366					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
4367					    M_DONTWAIT);
4368					if (SCTP_BUF_NEXT(mm)) {
4369						sctp_queue_op_err(stcb, mm);
4370					} else {
4371						sctp_m_freem(mm);
4372					}
4373				}
4374			}
4375			if ((ch->chunk_type & 0x80) == 0) {
4376				/* discard this packet */
4377				*offset = length;
4378				return (stcb);
4379			}	/* else skip this bad chunk and continue... */
4380			break;
4381		}		/* switch (ch->chunk_type) */
4382
4383
4384next_chunk:
4385		/* get the next chunk */
4386		*offset += SCTP_SIZE32(chk_length);
4387		if (*offset >= length) {
4388			/* no more data left in the mbuf chain */
4389			break;
4390		}
4391		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4392		    sizeof(struct sctp_chunkhdr), chunk_buf);
4393		if (ch == NULL) {
4394			if (locked_tcb) {
4395				SCTP_TCB_UNLOCK(locked_tcb);
4396			}
4397			*offset = length;
4398			return (NULL);
4399		}
4400	}			/* while */
4401	return (stcb);
4402}
4403
4404
4405/*
4406 * Process the ECN bits we have something set so we must look to see if it is
4407 * ECN(0) or ECN(1) or CE
4408 */
4409static __inline void
4410sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
4411    uint8_t ecn_bits)
4412{
4413	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4414		;
4415	} else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
4416		/*
4417		 * we only add to the nonce sum for ECT1, ECT0 does not
4418		 * change the NS bit (that we have yet to find a way to send
4419		 * it yet).
4420		 */
4421
4422		/* ECN Nonce stuff */
4423		stcb->asoc.receiver_nonce_sum++;
4424		stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
4425
4426		/*
4427		 * Drag up the last_echo point if cumack is larger since we
4428		 * don't want the point falling way behind by more than
4429		 * 2^^31 and then having it be incorrect.
4430		 */
4431		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4432		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4433			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4434		}
4435	} else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
4436		/*
4437		 * Drag up the last_echo point if cumack is larger since we
4438		 * don't want the point falling way behind by more than
4439		 * 2^^31 and then having it be incorrect.
4440		 */
4441		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4442		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4443			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4444		}
4445	}
4446}
4447
4448static __inline void
4449sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
4450    uint32_t high_tsn, uint8_t ecn_bits)
4451{
4452	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4453		/*
4454		 * we possibly must notify the sender that a congestion
4455		 * window reduction is in order. We do this by adding a ECNE
4456		 * chunk to the output chunk queue. The incoming CWR will
4457		 * remove this chunk.
4458		 */
4459		if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
4460		    MAX_TSN)) {
4461			/* Yep, we need to add a ECNE */
4462			sctp_send_ecn_echo(stcb, net, high_tsn);
4463			stcb->asoc.last_echo_tsn = high_tsn;
4464		}
4465	}
4466}
4467
4468/*
4469 * common input chunk processing (v4 and v6)
4470 */
4471void
4472sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
4473    int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
4474    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
4475    uint8_t ecn_bits, uint32_t vrf_id)
4476{
4477	/*
4478	 * Control chunk processing
4479	 */
4480	uint32_t high_tsn;
4481	int fwd_tsn_seen = 0, data_processed = 0;
4482	struct mbuf *m = *mm;
4483	int abort_flag = 0;
4484	int un_sent;
4485
4486	SCTP_STAT_INCR(sctps_recvdatagrams);
4487#ifdef SCTP_AUDITING_ENABLED
4488	sctp_audit_log(0xE0, 1);
4489	sctp_auditing(0, inp, stcb, net);
4490#endif
4491
4492	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n",
4493	    m, iphlen, offset);
4494
4495	if (stcb) {
4496		/* always clear this before beginning a packet */
4497		stcb->asoc.authenticated = 0;
4498		stcb->asoc.seen_a_sack_this_pkt = 0;
4499	}
4500	if (IS_SCTP_CONTROL(ch)) {
4501		/* process the control portion of the SCTP packet */
4502		/* sa_ignore NO_NULL_CHK */
4503		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
4504		    inp, stcb, &net, &fwd_tsn_seen, vrf_id);
4505		if (stcb) {
4506			/*
4507			 * This covers us if the cookie-echo was there and
4508			 * it changes our INP.
4509			 */
4510			inp = stcb->sctp_ep;
4511		}
4512	} else {
4513		/*
4514		 * no control chunks, so pre-process DATA chunks (these
4515		 * checks are taken care of by control processing)
4516		 */
4517
4518		/*
4519		 * if DATA only packet, and auth is required, then punt...
4520		 * can't have authenticated without any AUTH (control)
4521		 * chunks
4522		 */
4523		if ((stcb != NULL) && !sctp_auth_disable &&
4524		    sctp_auth_is_required_chunk(SCTP_DATA,
4525		    stcb->asoc.local_auth_chunks)) {
4526			/* "silently" ignore */
4527			SCTP_STAT_INCR(sctps_recvauthmissing);
4528			SCTP_TCB_UNLOCK(stcb);
4529			return;
4530		}
4531		if (stcb == NULL) {
4532			/* out of the blue DATA chunk */
4533			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4534			    vrf_id);
4535			return;
4536		}
4537		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
4538			/* v_tag mismatch! */
4539			SCTP_STAT_INCR(sctps_badvtag);
4540			SCTP_TCB_UNLOCK(stcb);
4541			return;
4542		}
4543	}
4544
4545	if (stcb == NULL) {
4546		/*
4547		 * no valid TCB for this packet, or we found it's a bad
4548		 * packet while processing control, or we're done with this
4549		 * packet (done or skip rest of data), so we drop it...
4550		 */
4551		return;
4552	}
4553	/*
4554	 * DATA chunk processing
4555	 */
4556	/* plow through the data chunks while length > offset */
4557
4558	/*
4559	 * Rest should be DATA only.  Check authentication state if AUTH for
4560	 * DATA is required.
4561	 */
4562	if ((length > offset) && (stcb != NULL) && !sctp_auth_disable &&
4563	    sctp_auth_is_required_chunk(SCTP_DATA,
4564	    stcb->asoc.local_auth_chunks) &&
4565	    !stcb->asoc.authenticated) {
4566		/* "silently" ignore */
4567		SCTP_STAT_INCR(sctps_recvauthmissing);
4568		SCTPDBG(SCTP_DEBUG_AUTH1,
4569		    "Data chunk requires AUTH, skipped\n");
4570		goto trigger_send;
4571	}
4572	if (length > offset) {
4573		int retval;
4574
4575		/*
4576		 * First check to make sure our state is correct. We would
4577		 * not get here unless we really did have a tag, so we don't
4578		 * abort if this happens, just dump the chunk silently.
4579		 */
4580		switch (SCTP_GET_STATE(&stcb->asoc)) {
4581		case SCTP_STATE_COOKIE_ECHOED:
4582			/*
4583			 * we consider data with valid tags in this state
4584			 * shows us the cookie-ack was lost. Imply it was
4585			 * there.
4586			 */
4587			stcb->asoc.overall_error_count = 0;
4588			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
4589			break;
4590		case SCTP_STATE_COOKIE_WAIT:
4591			/*
4592			 * We consider OOTB any data sent during asoc setup.
4593			 */
4594			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4595			    vrf_id);
4596			SCTP_TCB_UNLOCK(stcb);
4597			return;
4598			break;
4599		case SCTP_STATE_EMPTY:	/* should not happen */
4600		case SCTP_STATE_INUSE:	/* should not happen */
4601		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
4602		case SCTP_STATE_SHUTDOWN_ACK_SENT:
4603		default:
4604			SCTP_TCB_UNLOCK(stcb);
4605			return;
4606			break;
4607		case SCTP_STATE_OPEN:
4608		case SCTP_STATE_SHUTDOWN_SENT:
4609			break;
4610		}
4611		/* take care of ECN, part 1. */
4612		if (stcb->asoc.ecn_allowed &&
4613		    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4614			sctp_process_ecn_marked_a(stcb, net, ecn_bits);
4615		}
4616		/* plow through the data chunks while length > offset */
4617		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
4618		    inp, stcb, net, &high_tsn);
4619		if (retval == 2) {
4620			/*
4621			 * The association aborted, NO UNLOCK needed since
4622			 * the association is destroyed.
4623			 */
4624			return;
4625		}
4626		data_processed = 1;
4627		if (retval == 0) {
4628			/* take care of ecn part 2. */
4629			if (stcb->asoc.ecn_allowed &&
4630			    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4631				sctp_process_ecn_marked_b(stcb, net, high_tsn,
4632				    ecn_bits);
4633			}
4634		}
4635		/*
4636		 * Anything important needs to have been m_copy'ed in
4637		 * process_data
4638		 */
4639	}
4640	if ((data_processed == 0) && (fwd_tsn_seen)) {
4641		int was_a_gap = 0;
4642
4643		if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
4644		    stcb->asoc.cumulative_tsn, MAX_TSN)) {
4645			/* there was a gap before this data was processed */
4646			was_a_gap = 1;
4647		}
4648		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
4649		if (abort_flag) {
4650			/* Again, we aborted so NO UNLOCK needed */
4651			return;
4652		}
4653	}
4654	/* trigger send of any chunks in queue... */
4655trigger_send:
4656#ifdef SCTP_AUDITING_ENABLED
4657	sctp_audit_log(0xE0, 2);
4658	sctp_auditing(1, inp, stcb, net);
4659#endif
4660	SCTPDBG(SCTP_DEBUG_INPUT1,
4661	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
4662	    stcb->asoc.peers_rwnd,
4663	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
4664	    stcb->asoc.total_flight);
4665	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
4666
4667	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
4668	    ((un_sent) &&
4669	    (stcb->asoc.peers_rwnd > 0 ||
4670	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
4671		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
4672		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
4673		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
4674	}
4675#ifdef SCTP_AUDITING_ENABLED
4676	sctp_audit_log(0xE0, 3);
4677	sctp_auditing(2, inp, stcb, net);
4678#endif
4679	SCTP_TCB_UNLOCK(stcb);
4680	return;
4681}
4682
4683
4684
4685void
4686sctp_input(i_pak, off)
4687	struct mbuf *i_pak;
4688	int off;
4689
4690{
4691#ifdef SCTP_MBUF_LOGGING
4692	struct mbuf *mat;
4693
4694#endif
4695	struct mbuf *m;
4696	int iphlen;
4697	uint32_t vrf_id = 0;
4698	uint8_t ecn_bits;
4699	struct ip *ip;
4700	struct sctphdr *sh;
4701	struct sctp_inpcb *inp = NULL;
4702
4703	uint32_t check, calc_check;
4704	struct sctp_nets *net;
4705	struct sctp_tcb *stcb = NULL;
4706	struct sctp_chunkhdr *ch;
4707	int refcount_up = 0;
4708	int length, mlen, offset;
4709
4710
4711	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
4712		SCTP_RELEASE_PKT(i_pak);
4713		return;
4714	}
4715	mlen = SCTP_HEADER_LEN(i_pak);
4716	iphlen = off;
4717	m = SCTP_HEADER_TO_CHAIN(i_pak);
4718
4719	net = NULL;
4720	SCTP_STAT_INCR(sctps_recvpackets);
4721	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
4722
4723
4724#ifdef SCTP_MBUF_LOGGING
4725	/* Log in any input mbufs */
4726	mat = m;
4727	while (mat) {
4728		if (SCTP_BUF_IS_EXTENDED(mat)) {
4729			sctp_log_mb(mat, SCTP_MBUF_INPUT);
4730		}
4731		mat = SCTP_BUF_NEXT(mat);
4732	}
4733#endif
4734#ifdef  SCTP_PACKET_LOGGING
4735	sctp_packet_log(m, mlen);
4736#endif
4737	/*
4738	 * Must take out the iphlen, since mlen expects this (only effect lb
4739	 * case)
4740	 */
4741	mlen -= iphlen;
4742
4743	/*
4744	 * Get IP, SCTP, and first chunk header together in first mbuf.
4745	 */
4746	ip = mtod(m, struct ip *);
4747	offset = iphlen + sizeof(*sh) + sizeof(*ch);
4748	if (SCTP_BUF_LEN(m) < offset) {
4749		if ((m = m_pullup(m, offset)) == 0) {
4750			SCTP_STAT_INCR(sctps_hdrops);
4751			return;
4752		}
4753		ip = mtod(m, struct ip *);
4754	}
4755	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
4756	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
4757
4758	/* SCTP does not allow broadcasts or multicasts */
4759	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
4760		goto bad;
4761	}
4762	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
4763		/*
4764		 * We only look at broadcast if its a front state, All
4765		 * others we will not have a tcb for anyway.
4766		 */
4767		goto bad;
4768	}
4769	/* validate SCTP checksum */
4770	if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) {
4771		/*
4772		 * we do NOT validate things from the loopback if the sysctl
4773		 * is set to 1.
4774		 */
4775		check = sh->checksum;	/* save incoming checksum */
4776		if ((check == 0) && (sctp_no_csum_on_loopback)) {
4777			/*
4778			 * special hook for where we got a local address
4779			 * somehow routed across a non IFT_LOOP type
4780			 * interface
4781			 */
4782			if (ip->ip_src.s_addr == ip->ip_dst.s_addr)
4783				goto sctp_skip_csum_4;
4784		}
4785		sh->checksum = 0;	/* prepare for calc */
4786		calc_check = sctp_calculate_sum(m, &mlen, iphlen);
4787		if (calc_check != check) {
4788			SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
4789			    calc_check, check, m, mlen, iphlen);
4790
4791			stcb = sctp_findassociation_addr(m, iphlen,
4792			    offset - sizeof(*ch),
4793			    sh, ch, &inp, &net,
4794			    vrf_id);
4795			if ((inp) && (stcb)) {
4796				sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
4797				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR);
4798			} else if ((inp != NULL) && (stcb == NULL)) {
4799				refcount_up = 1;
4800			}
4801			SCTP_STAT_INCR(sctps_badsum);
4802			SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
4803			goto bad;
4804		}
4805		sh->checksum = calc_check;
4806	}
4807sctp_skip_csum_4:
4808	/* destination port of 0 is illegal, based on RFC2960. */
4809	if (sh->dest_port == 0) {
4810		SCTP_STAT_INCR(sctps_hdrops);
4811		goto bad;
4812	}
4813	/* validate mbuf chain length with IP payload length */
4814	if (mlen < (ip->ip_len - iphlen)) {
4815		SCTP_STAT_INCR(sctps_hdrops);
4816		goto bad;
4817	}
4818	/*
4819	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
4820	 * IP/SCTP/first chunk header...
4821	 */
4822	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
4823	    sh, ch, &inp, &net, vrf_id);
4824	/* inp's ref-count increased && stcb locked */
4825	if (inp == NULL) {
4826		struct sctp_init_chunk *init_chk, chunk_buf;
4827
4828		SCTP_STAT_INCR(sctps_noport);
4829#ifdef ICMP_BANDLIM
4830		/*
4831		 * we use the bandwidth limiting to protect against sending
4832		 * too many ABORTS all at once. In this case these count the
4833		 * same as an ICMP message.
4834		 */
4835		if (badport_bandlim(0) < 0)
4836			goto bad;
4837#endif				/* ICMP_BANDLIM */
4838		SCTPDBG(SCTP_DEBUG_INPUT1,
4839		    "Sending a ABORT from packet entry!\n");
4840		if (ch->chunk_type == SCTP_INITIATION) {
4841			/*
4842			 * we do a trick here to get the INIT tag, dig in
4843			 * and get the tag from the INIT and put it in the
4844			 * common header.
4845			 */
4846			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4847			    iphlen + sizeof(*sh), sizeof(*init_chk),
4848			    (uint8_t *) & chunk_buf);
4849			if (init_chk != NULL)
4850				sh->v_tag = init_chk->init.initiate_tag;
4851		}
4852		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4853			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
4854			goto bad;
4855		}
4856		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
4857			goto bad;
4858		}
4859		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
4860			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id);
4861		goto bad;
4862	} else if (stcb == NULL) {
4863		refcount_up = 1;
4864	}
4865#ifdef IPSEC
4866	/*
4867	 * I very much doubt any of the IPSEC stuff will work but I have no
4868	 * idea, so I will leave it in place.
4869	 */
4870
4871	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
4872		ipsecstat.in_polvio++;
4873		SCTP_STAT_INCR(sctps_hdrops);
4874		goto bad;
4875	}
4876#endif				/* IPSEC */
4877
4878	/*
4879	 * common chunk processing
4880	 */
4881	length = ip->ip_len + iphlen;
4882	offset -= sizeof(struct sctp_chunkhdr);
4883
4884	ecn_bits = ip->ip_tos;
4885
4886	/* sa_ignore NO_NULL_CHK */
4887	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
4888	    inp, stcb, net, ecn_bits, vrf_id);
4889	/* inp's ref-count reduced && stcb unlocked */
4890	if (m) {
4891		sctp_m_freem(m);
4892	}
4893	if ((inp) && (refcount_up)) {
4894		/* reduce ref-count */
4895		SCTP_INP_WLOCK(inp);
4896		SCTP_INP_DECR_REF(inp);
4897		SCTP_INP_WUNLOCK(inp);
4898	}
4899	return;
4900bad:
4901	if (stcb) {
4902		SCTP_TCB_UNLOCK(stcb);
4903	}
4904	if ((inp) && (refcount_up)) {
4905		/* reduce ref-count */
4906		SCTP_INP_WLOCK(inp);
4907		SCTP_INP_DECR_REF(inp);
4908		SCTP_INP_WUNLOCK(inp);
4909	}
4910	if (m) {
4911		sctp_m_freem(m);
4912	}
4913	return;
4914}
4915