sctp_input.c revision 171133
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 171133 2007-07-01 11:41:27Z gnn $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_auth.h>
45#include <netinet/sctp_indata.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_bsd_addr.h>
48
49
50
51static void
52sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
53{
54	struct sctp_nets *net;
55
56	/*
57	 * This now not only stops all cookie timers it also stops any INIT
58	 * timers as well. This will make sure that the timers are stopped
59	 * in all collision cases.
60	 */
61	SCTP_TCB_LOCK_ASSERT(stcb);
62	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
63		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
64			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
65			    stcb->sctp_ep,
66			    stcb,
67			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
68		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
69			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
70			    stcb->sctp_ep,
71			    stcb,
72			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
73		}
74	}
75}
76
77/* INIT handler */
78static void
79sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
80    struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
81    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
82{
83	struct sctp_init *init;
84	struct mbuf *op_err;
85	uint32_t init_limit;
86
87	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
88	    stcb);
89	op_err = NULL;
90	init = &cp->init;
91	/* First are we accepting? */
92	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
93		SCTPDBG(SCTP_DEBUG_INPUT2,
94		    "sctp_handle_init: Abort, so_qlimit:%d\n",
95		    inp->sctp_socket->so_qlimit);
96		/*
97		 * FIX ME ?? What about TCP model and we have a
98		 * match/restart case?
99		 */
100		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
101		    vrf_id);
102		if (stcb)
103			*abort_no_unlock = 1;
104		return;
105	}
106	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
107		/* Invalid length */
108		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
109		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
110		    vrf_id);
111		if (stcb)
112			*abort_no_unlock = 1;
113		return;
114	}
115	/* validate parameters */
116	if (init->initiate_tag == 0) {
117		/* protocol error... send abort */
118		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
119		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
120		    vrf_id);
121		if (stcb)
122			*abort_no_unlock = 1;
123		return;
124	}
125	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
126		/* invalid parameter... send abort */
127		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
128		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
129		    vrf_id);
130		return;
131	}
132	if (init->num_inbound_streams == 0) {
133		/* protocol error... send abort */
134		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
135		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
136		    vrf_id);
137		if (stcb)
138			*abort_no_unlock = 1;
139		return;
140	}
141	if (init->num_outbound_streams == 0) {
142		/* protocol error... send abort */
143		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
144		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
145		    vrf_id);
146		if (stcb)
147			*abort_no_unlock = 1;
148		return;
149	}
150	init_limit = offset + ntohs(cp->ch.chunk_length);
151	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
152	    init_limit)) {
153		/* auth parameter(s) error... send abort */
154		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id);
155		if (stcb)
156			*abort_no_unlock = 1;
157		return;
158	}
159	/* send an INIT-ACK w/cookie */
160	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
161	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id);
162}
163
164/*
165 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
166 */
167static int
168sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
169    struct sctp_nets *net)
170{
171	struct sctp_init *init;
172	struct sctp_association *asoc;
173	struct sctp_nets *lnet;
174	unsigned int i;
175
176	init = &cp->init;
177	asoc = &stcb->asoc;
178	/* save off parameters */
179	asoc->peer_vtag = ntohl(init->initiate_tag);
180	asoc->peers_rwnd = ntohl(init->a_rwnd);
181	if (TAILQ_FIRST(&asoc->nets)) {
182		/* update any ssthresh's that may have a default */
183		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
184			lnet->ssthresh = asoc->peers_rwnd;
185
186			if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
187				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
188			}
189		}
190	}
191	SCTP_TCB_SEND_LOCK(stcb);
192	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
193		unsigned int newcnt;
194		struct sctp_stream_out *outs;
195		struct sctp_stream_queue_pending *sp;
196
197		/* cut back on number of streams */
198		newcnt = ntohs(init->num_inbound_streams);
199		/* This if is probably not needed but I am cautious */
200		if (asoc->strmout) {
201			/* First make sure no data chunks are trapped */
202			for (i = newcnt; i < asoc->pre_open_streams; i++) {
203				outs = &asoc->strmout[i];
204				sp = TAILQ_FIRST(&outs->outqueue);
205				while (sp) {
206					TAILQ_REMOVE(&outs->outqueue, sp,
207					    next);
208					asoc->stream_queue_cnt--;
209					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
210					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
211					    sp);
212					if (sp->data) {
213						sctp_m_freem(sp->data);
214						sp->data = NULL;
215					}
216					sctp_free_remote_addr(sp->net);
217					sp->net = NULL;
218					/* Free the chunk */
219					SCTP_PRINTF("sp:%p tcb:%p weird free case\n",
220					    sp, stcb);
221
222					sctp_free_a_strmoq(stcb, sp);
223					/* sa_ignore FREED_MEMORY */
224					sp = TAILQ_FIRST(&outs->outqueue);
225				}
226			}
227		}
228		/* cut back the count and abandon the upper streams */
229		asoc->pre_open_streams = newcnt;
230	}
231	SCTP_TCB_SEND_UNLOCK(stcb);
232	asoc->streamoutcnt = asoc->pre_open_streams;
233	/* init tsn's */
234	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
235	if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
236		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
237	}
238	/* This is the next one we expect */
239	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
240
241	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
242	asoc->cumulative_tsn = asoc->asconf_seq_in;
243	asoc->last_echo_tsn = asoc->asconf_seq_in;
244	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
245	/* open the requested streams */
246
247	if (asoc->strmin != NULL) {
248		/* Free the old ones */
249		struct sctp_queued_to_read *ctl;
250
251		for (i = 0; i < asoc->streamincnt; i++) {
252			ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
253			while (ctl) {
254				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
255				sctp_free_remote_addr(ctl->whoFrom);
256				sctp_m_freem(ctl->data);
257				ctl->data = NULL;
258				sctp_free_a_readq(stcb, ctl);
259				ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
260			}
261		}
262		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
263	}
264	asoc->streamincnt = ntohs(init->num_outbound_streams);
265	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
266		asoc->streamincnt = MAX_SCTP_STREAMS;
267	}
268	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
269	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
270	if (asoc->strmin == NULL) {
271		/* we didn't get memory for the streams! */
272		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
273		return (-1);
274	}
275	for (i = 0; i < asoc->streamincnt; i++) {
276		asoc->strmin[i].stream_no = i;
277		asoc->strmin[i].last_sequence_delivered = 0xffff;
278		/*
279		 * U-stream ranges will be set when the cookie is unpacked.
280		 * Or for the INIT sender they are un set (if pr-sctp not
281		 * supported) when the INIT-ACK arrives.
282		 */
283		TAILQ_INIT(&asoc->strmin[i].inqueue);
284		asoc->strmin[i].delivery_started = 0;
285	}
286	/*
287	 * load_address_from_init will put the addresses into the
288	 * association when the COOKIE is processed or the INIT-ACK is
289	 * processed. Both types of COOKIE's existing and new call this
290	 * routine. It will remove addresses that are no longer in the
291	 * association (for the restarting case where addresses are
292	 * removed). Up front when the INIT arrives we will discard it if it
293	 * is a restart and new addresses have been added.
294	 */
295	/* sa_ignore MEMLEAK */
296	return (0);
297}
298
299/*
300 * INIT-ACK message processing/consumption returns value < 0 on error
301 */
302static int
303sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
304    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
305    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
306{
307	struct sctp_association *asoc;
308	struct mbuf *op_err;
309	int retval, abort_flag;
310	uint32_t initack_limit;
311
312	/* First verify that we have no illegal param's */
313	abort_flag = 0;
314	op_err = NULL;
315
316	op_err = sctp_arethere_unrecognized_parameters(m,
317	    (offset + sizeof(struct sctp_init_chunk)),
318	    &abort_flag, (struct sctp_chunkhdr *)cp);
319	if (abort_flag) {
320		/* Send an abort and notify peer */
321		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err);
322		*abort_no_unlock = 1;
323		return (-1);
324	}
325	asoc = &stcb->asoc;
326	/* process the peer's parameters in the INIT-ACK */
327	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
328	if (retval < 0) {
329		return (retval);
330	}
331	initack_limit = offset + ntohs(cp->ch.chunk_length);
332	/* load all addresses */
333	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
334	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
335	    NULL))) {
336		/* Huh, we should abort */
337		SCTPDBG(SCTP_DEBUG_INPUT1,
338		    "Load addresses from INIT causes an abort %d\n",
339		    retval);
340		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
341		    NULL, 0);
342		*abort_no_unlock = 1;
343		return (-1);
344	}
345	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
346	    stcb->asoc.local_hmacs);
347	if (op_err) {
348		sctp_queue_op_err(stcb, op_err);
349		/* queuing will steal away the mbuf chain to the out queue */
350		op_err = NULL;
351	}
352	/* extract the cookie and queue it to "echo" it back... */
353	stcb->asoc.overall_error_count = 0;
354	net->error_count = 0;
355
356	/*
357	 * Cancel the INIT timer, We do this first before queueing the
358	 * cookie. We always cancel at the primary to assue that we are
359	 * canceling the timer started by the INIT which always goes to the
360	 * primary.
361	 */
362	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
363	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
364
365	/* calculate the RTO */
366	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered);
367
368	retval = sctp_send_cookie_echo(m, offset, stcb, net);
369	if (retval < 0) {
370		/*
371		 * No cookie, we probably should send a op error. But in any
372		 * case if there is no cookie in the INIT-ACK, we can
373		 * abandon the peer, its broke.
374		 */
375		if (retval == -3) {
376			/* We abort with an error of missing mandatory param */
377			op_err =
378			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
379			if (op_err) {
380				/*
381				 * Expand beyond to include the mandatory
382				 * param cookie
383				 */
384				struct sctp_inv_mandatory_param *mp;
385
386				SCTP_BUF_LEN(op_err) =
387				    sizeof(struct sctp_inv_mandatory_param);
388				mp = mtod(op_err,
389				    struct sctp_inv_mandatory_param *);
390				/* Subtract the reserved param */
391				mp->length =
392				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
393				mp->num_param = htonl(1);
394				mp->param = htons(SCTP_STATE_COOKIE);
395				mp->resv = 0;
396			}
397			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
398			    sh, op_err, 0);
399			*abort_no_unlock = 1;
400		}
401		return (retval);
402	}
403	return (0);
404}
405
406static void
407sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
408    struct sctp_tcb *stcb, struct sctp_nets *net)
409{
410	struct sockaddr_storage store;
411	struct sockaddr_in *sin;
412	struct sockaddr_in6 *sin6;
413	struct sctp_nets *r_net;
414	struct timeval tv;
415
416	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
417		/* Invalid length */
418		return;
419	}
420	sin = (struct sockaddr_in *)&store;
421	sin6 = (struct sockaddr_in6 *)&store;
422
423	memset(&store, 0, sizeof(store));
424	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
425	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
426		sin->sin_family = cp->heartbeat.hb_info.addr_family;
427		sin->sin_len = cp->heartbeat.hb_info.addr_len;
428		sin->sin_port = stcb->rport;
429		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
430		    sizeof(sin->sin_addr));
431	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
432	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
433		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
434		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
435		sin6->sin6_port = stcb->rport;
436		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
437		    sizeof(sin6->sin6_addr));
438	} else {
439		return;
440	}
441	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
442	if (r_net == NULL) {
443		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
444		return;
445	}
446	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
447	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
448	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
449		/*
450		 * If the its a HB and it's random value is correct when can
451		 * confirm the destination.
452		 */
453		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
454		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
455			stcb->asoc.primary_destination = r_net;
456			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
457			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
458			r_net = TAILQ_FIRST(&stcb->asoc.nets);
459			if (r_net != stcb->asoc.primary_destination) {
460				/*
461				 * first one on the list is NOT the primary
462				 * sctp_cmpaddr() is much more efficent if
463				 * the primary is the first on the list,
464				 * make it so.
465				 */
466				TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
467				TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
468			}
469		}
470		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
471		    stcb, 0, (void *)r_net);
472	}
473	r_net->error_count = 0;
474	r_net->hb_responded = 1;
475	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
476	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
477	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
478		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
479		r_net->dest_state |= SCTP_ADDR_REACHABLE;
480		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
481		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net);
482		/* now was it the primary? if so restore */
483		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
484			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
485		}
486	}
487	/* Now lets do a RTO with this */
488	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv);
489}
490
491static void
492sctp_handle_abort(struct sctp_abort_chunk *cp,
493    struct sctp_tcb *stcb, struct sctp_nets *net)
494{
495	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
496	if (stcb == NULL)
497		return;
498
499	/* stop any receive timers */
500	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
501	/* notify user of the abort and clean up... */
502	sctp_abort_notification(stcb, 0);
503	/* free the tcb */
504	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
505	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
506	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
507		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
508	}
509#ifdef SCTP_ASOCLOG_OF_TSNS
510	sctp_print_out_track_log(stcb);
511#endif
512	sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
513	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
514}
515
516static void
517sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
518    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
519{
520	struct sctp_association *asoc;
521	int some_on_streamwheel;
522
523	SCTPDBG(SCTP_DEBUG_INPUT2,
524	    "sctp_handle_shutdown: handling SHUTDOWN\n");
525	if (stcb == NULL)
526		return;
527	asoc = &stcb->asoc;
528	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
529	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
530		return;
531	}
532	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
533		/* Shutdown NOT the expected size */
534		return;
535	} else {
536		sctp_update_acked(stcb, cp, net, abort_flag);
537	}
538	if (asoc->control_pdapi) {
539		/*
540		 * With a normal shutdown we assume the end of last record.
541		 */
542		SCTP_INP_READ_LOCK(stcb->sctp_ep);
543		asoc->control_pdapi->end_added = 1;
544		asoc->control_pdapi->pdapi_aborted = 1;
545		asoc->control_pdapi = NULL;
546		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
547		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
548	}
549	/* goto SHUTDOWN_RECEIVED state to block new requests */
550	if (stcb->sctp_socket) {
551		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
552		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
553		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
554			asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED;
555			/*
556			 * notify upper layer that peer has initiated a
557			 * shutdown
558			 */
559			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL);
560
561			/* reset time */
562			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
563		}
564	}
565	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
566		/*
567		 * stop the shutdown timer, since we WILL move to
568		 * SHUTDOWN-ACK-SENT.
569		 */
570		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
571	}
572	/* Now are we there yet? */
573	some_on_streamwheel = 0;
574	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
575		/* Check to see if some data queued */
576		struct sctp_stream_out *outs;
577
578		TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
579			if (!TAILQ_EMPTY(&outs->outqueue)) {
580				some_on_streamwheel = 1;
581				break;
582			}
583		}
584	}
585	if (!TAILQ_EMPTY(&asoc->send_queue) ||
586	    !TAILQ_EMPTY(&asoc->sent_queue) ||
587	    some_on_streamwheel) {
588		/* By returning we will push more data out */
589		return;
590	} else {
591		/* no outstanding data to send, so move on... */
592		/* send SHUTDOWN-ACK */
593		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
594		/* move to SHUTDOWN-ACK-SENT state */
595		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
596		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
597			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
598		}
599		asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
600
601		/* start SHUTDOWN timer */
602		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
603		    stcb, net);
604	}
605}
606
607static void
608sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
609    struct sctp_tcb *stcb, struct sctp_nets *net)
610{
611	struct sctp_association *asoc;
612
613	SCTPDBG(SCTP_DEBUG_INPUT2,
614	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
615	if (stcb == NULL)
616		return;
617
618	asoc = &stcb->asoc;
619	/* process according to association state */
620	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
621	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
622		/* unexpected SHUTDOWN-ACK... so ignore... */
623		SCTP_TCB_UNLOCK(stcb);
624		return;
625	}
626	if (asoc->control_pdapi) {
627		/*
628		 * With a normal shutdown we assume the end of last record.
629		 */
630		SCTP_INP_READ_LOCK(stcb->sctp_ep);
631		asoc->control_pdapi->end_added = 1;
632		asoc->control_pdapi->pdapi_aborted = 1;
633		asoc->control_pdapi = NULL;
634		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
635		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
636	}
637	/* are the queues empty? */
638	if (!TAILQ_EMPTY(&asoc->send_queue) ||
639	    !TAILQ_EMPTY(&asoc->sent_queue) ||
640	    !TAILQ_EMPTY(&asoc->out_wheel)) {
641		sctp_report_all_outbound(stcb, 0);
642	}
643	/* stop the timer */
644	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
645	/* send SHUTDOWN-COMPLETE */
646	sctp_send_shutdown_complete(stcb, net);
647	/* notify upper layer protocol */
648	if (stcb->sctp_socket) {
649		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
650		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
651		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
652			/* Set the connected flag to disconnected */
653			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
654		}
655	}
656	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
657	/* free the TCB but first save off the ep */
658	sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
659	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
660}
661
662/*
663 * Skip past the param header and then we will find the chunk that caused the
664 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
665 * our peer must be broken.
666 */
667static void
668sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
669    struct sctp_nets *net)
670{
671	struct sctp_chunkhdr *chk;
672
673	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
674	switch (chk->chunk_type) {
675	case SCTP_ASCONF_ACK:
676	case SCTP_ASCONF:
677		sctp_asconf_cleanup(stcb, net);
678		break;
679	case SCTP_FORWARD_CUM_TSN:
680		stcb->asoc.peer_supports_prsctp = 0;
681		break;
682	default:
683		SCTPDBG(SCTP_DEBUG_INPUT2,
684		    "Peer does not support chunk type %d(%x)??\n",
685		    chk->chunk_type, (uint32_t) chk->chunk_type);
686		break;
687	}
688}
689
690/*
691 * Skip past the param header and then we will find the param that caused the
692 * problem.  There are a number of param's in a ASCONF OR the prsctp param
693 * these will turn of specific features.
694 */
695static void
696sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
697{
698	struct sctp_paramhdr *pbad;
699
700	pbad = phdr + 1;
701	switch (ntohs(pbad->param_type)) {
702		/* pr-sctp draft */
703	case SCTP_PRSCTP_SUPPORTED:
704		stcb->asoc.peer_supports_prsctp = 0;
705		break;
706	case SCTP_SUPPORTED_CHUNK_EXT:
707		break;
708		/* draft-ietf-tsvwg-addip-sctp */
709	case SCTP_ECN_NONCE_SUPPORTED:
710		stcb->asoc.peer_supports_ecn_nonce = 0;
711		stcb->asoc.ecn_nonce_allowed = 0;
712		stcb->asoc.ecn_allowed = 0;
713		break;
714	case SCTP_ADD_IP_ADDRESS:
715	case SCTP_DEL_IP_ADDRESS:
716	case SCTP_SET_PRIM_ADDR:
717		stcb->asoc.peer_supports_asconf = 0;
718		break;
719	case SCTP_SUCCESS_REPORT:
720	case SCTP_ERROR_CAUSE_IND:
721		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
722		SCTPDBG(SCTP_DEBUG_INPUT2,
723		    "Turning off ASCONF to this strange peer\n");
724		stcb->asoc.peer_supports_asconf = 0;
725		break;
726	default:
727		SCTPDBG(SCTP_DEBUG_INPUT2,
728		    "Peer does not support param type %d(%x)??\n",
729		    pbad->param_type, (uint32_t) pbad->param_type);
730		break;
731	}
732}
733
734static int
735sctp_handle_error(struct sctp_chunkhdr *ch,
736    struct sctp_tcb *stcb, struct sctp_nets *net)
737{
738	int chklen;
739	struct sctp_paramhdr *phdr;
740	uint16_t error_type;
741	uint16_t error_len;
742	struct sctp_association *asoc;
743
744	int adjust;
745
746	/* parse through all of the errors and process */
747	asoc = &stcb->asoc;
748	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
749	    sizeof(struct sctp_chunkhdr));
750	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
751	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
752		/* Process an Error Cause */
753		error_type = ntohs(phdr->param_type);
754		error_len = ntohs(phdr->param_length);
755		if ((error_len > chklen) || (error_len == 0)) {
756			/* invalid param length for this param */
757			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
758			    chklen, error_len);
759			return (0);
760		}
761		switch (error_type) {
762		case SCTP_CAUSE_INVALID_STREAM:
763		case SCTP_CAUSE_MISSING_PARAM:
764		case SCTP_CAUSE_INVALID_PARAM:
765		case SCTP_CAUSE_NO_USER_DATA:
766			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
767			    error_type);
768			break;
769		case SCTP_CAUSE_STALE_COOKIE:
770			/*
771			 * We only act if we have echoed a cookie and are
772			 * waiting.
773			 */
774			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
775				int *p;
776
777				p = (int *)((caddr_t)phdr + sizeof(*phdr));
778				/* Save the time doubled */
779				asoc->cookie_preserve_req = ntohl(*p) << 1;
780				asoc->stale_cookie_count++;
781				if (asoc->stale_cookie_count >
782				    asoc->max_init_times) {
783					sctp_abort_notification(stcb, 0);
784					/* now free the asoc */
785					sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
786					return (-1);
787				}
788				/* blast back to INIT state */
789				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
790				asoc->state |= SCTP_STATE_COOKIE_WAIT;
791
792				sctp_stop_all_cookie_timers(stcb);
793				sctp_send_initiate(stcb->sctp_ep, stcb);
794			}
795			break;
796		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
797			/*
798			 * Nothing we can do here, we don't do hostname
799			 * addresses so if the peer does not like my IPv6
800			 * (or IPv4 for that matter) it does not matter. If
801			 * they don't support that type of address, they can
802			 * NOT possibly get that packet type... i.e. with no
803			 * IPv6 you can't recieve a IPv6 packet. so we can
804			 * safely ignore this one. If we ever added support
805			 * for HOSTNAME Addresses, then we would need to do
806			 * something here.
807			 */
808			break;
809		case SCTP_CAUSE_UNRECOG_CHUNK:
810			sctp_process_unrecog_chunk(stcb, phdr, net);
811			break;
812		case SCTP_CAUSE_UNRECOG_PARAM:
813			sctp_process_unrecog_param(stcb, phdr);
814			break;
815		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
816			/*
817			 * We ignore this since the timer will drive out a
818			 * new cookie anyway and there timer will drive us
819			 * to send a SHUTDOWN_COMPLETE. We can't send one
820			 * here since we don't have their tag.
821			 */
822			break;
823		case SCTP_CAUSE_DELETING_LAST_ADDR:
824		case SCTP_CAUSE_RESOURCE_SHORTAGE:
825		case SCTP_CAUSE_DELETING_SRC_ADDR:
826			/*
827			 * We should NOT get these here, but in a
828			 * ASCONF-ACK.
829			 */
830			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
831			    error_type);
832			break;
833		case SCTP_CAUSE_OUT_OF_RESC:
834			/*
835			 * And what, pray tell do we do with the fact that
836			 * the peer is out of resources? Not really sure we
837			 * could do anything but abort. I suspect this
838			 * should have came WITH an abort instead of in a
839			 * OP-ERROR.
840			 */
841			break;
842		default:
843			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
844			    error_type);
845			break;
846		}
847		adjust = SCTP_SIZE32(error_len);
848		chklen -= adjust;
849		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
850	}
851	return (0);
852}
853
854static int
855sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
856    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
857    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
858{
859	struct sctp_init_ack *init_ack;
860	int *state;
861	struct mbuf *op_err;
862
863	SCTPDBG(SCTP_DEBUG_INPUT2,
864	    "sctp_handle_init_ack: handling INIT-ACK\n");
865
866	if (stcb == NULL) {
867		SCTPDBG(SCTP_DEBUG_INPUT2,
868		    "sctp_handle_init_ack: TCB is null\n");
869		return (-1);
870	}
871	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
872		/* Invalid length */
873		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
874		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
875		    op_err, 0);
876		*abort_no_unlock = 1;
877		return (-1);
878	}
879	init_ack = &cp->init;
880	/* validate parameters */
881	if (init_ack->initiate_tag == 0) {
882		/* protocol error... send an abort */
883		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
884		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
885		    op_err, 0);
886		*abort_no_unlock = 1;
887		return (-1);
888	}
889	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
890		/* protocol error... send an abort */
891		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
892		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
893		    op_err, 0);
894		*abort_no_unlock = 1;
895		return (-1);
896	}
897	if (init_ack->num_inbound_streams == 0) {
898		/* protocol error... send an abort */
899		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
900		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
901		    op_err, 0);
902		*abort_no_unlock = 1;
903		return (-1);
904	}
905	if (init_ack->num_outbound_streams == 0) {
906		/* protocol error... send an abort */
907		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
908		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
909		    op_err, 0);
910		*abort_no_unlock = 1;
911		return (-1);
912	}
913	/* process according to association state... */
914	state = &stcb->asoc.state;
915	switch (*state & SCTP_STATE_MASK) {
916	case SCTP_STATE_COOKIE_WAIT:
917		/* this is the expected state for this chunk */
918		/* process the INIT-ACK parameters */
919		if (stcb->asoc.primary_destination->dest_state &
920		    SCTP_ADDR_UNCONFIRMED) {
921			/*
922			 * The primary is where we sent the INIT, we can
923			 * always consider it confirmed when the INIT-ACK is
924			 * returned. Do this before we load addresses
925			 * though.
926			 */
927			stcb->asoc.primary_destination->dest_state &=
928			    ~SCTP_ADDR_UNCONFIRMED;
929			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
930			    stcb, 0, (void *)stcb->asoc.primary_destination);
931		}
932		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
933		    net, abort_no_unlock, vrf_id) < 0) {
934			/* error in parsing parameters */
935			return (-1);
936		}
937		/* update our state */
938		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
939		if (*state & SCTP_STATE_SHUTDOWN_PENDING) {
940			*state = SCTP_STATE_COOKIE_ECHOED |
941			    SCTP_STATE_SHUTDOWN_PENDING;
942		} else {
943			*state = SCTP_STATE_COOKIE_ECHOED;
944		}
945
946		/* reset the RTO calc */
947		stcb->asoc.overall_error_count = 0;
948		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
949		/*
950		 * collapse the init timer back in case of a exponential
951		 * backoff
952		 */
953		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
954		    stcb, net);
955		/*
956		 * the send at the end of the inbound data processing will
957		 * cause the cookie to be sent
958		 */
959		break;
960	case SCTP_STATE_SHUTDOWN_SENT:
961		/* incorrect state... discard */
962		break;
963	case SCTP_STATE_COOKIE_ECHOED:
964		/* incorrect state... discard */
965		break;
966	case SCTP_STATE_OPEN:
967		/* incorrect state... discard */
968		break;
969	case SCTP_STATE_EMPTY:
970	case SCTP_STATE_INUSE:
971	default:
972		/* incorrect state... discard */
973		return (-1);
974		break;
975	}
976	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
977	return (0);
978}
979
980
981/*
982 * handle a state cookie for an existing association m: input packet mbuf
983 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
984 * "split" mbuf and the cookie signature does not exist offset: offset into
985 * mbuf to the cookie-echo chunk
986 */
987static struct sctp_tcb *
988sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
989    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
990    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
991    struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
992    uint32_t vrf_id)
993{
994	struct sctp_association *asoc;
995	struct sctp_init_chunk *init_cp, init_buf;
996	struct sctp_init_ack_chunk *initack_cp, initack_buf;
997	int chk_length;
998	int init_offset, initack_offset, i;
999	int retval;
1000	int spec_flag = 0;
1001	uint32_t how_indx;
1002
1003	/* I know that the TCB is non-NULL from the caller */
1004	asoc = &stcb->asoc;
1005	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1006		if (asoc->cookie_how[how_indx] == 0)
1007			break;
1008	}
1009	if (how_indx < sizeof(asoc->cookie_how)) {
1010		asoc->cookie_how[how_indx] = 1;
1011	}
1012	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1013		/* SHUTDOWN came in after sending INIT-ACK */
1014		struct mbuf *op_err;
1015		struct sctp_paramhdr *ph;
1016
1017		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1018		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1019		    0, M_DONTWAIT, 1, MT_DATA);
1020		if (op_err == NULL) {
1021			/* FOOBAR */
1022			return (NULL);
1023		}
1024		/* pre-reserve some space */
1025		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1026		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1027		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1028		/* Set the len */
1029		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1030		ph = mtod(op_err, struct sctp_paramhdr *);
1031		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1032		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1033		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1034		    vrf_id);
1035		if (how_indx < sizeof(asoc->cookie_how))
1036			asoc->cookie_how[how_indx] = 2;
1037		return (NULL);
1038	}
1039	/*
1040	 * find and validate the INIT chunk in the cookie (peer's info) the
1041	 * INIT should start after the cookie-echo header struct (chunk
1042	 * header, state cookie header struct)
1043	 */
1044	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1045
1046	init_cp = (struct sctp_init_chunk *)
1047	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1048	    (uint8_t *) & init_buf);
1049	if (init_cp == NULL) {
1050		/* could not pull a INIT chunk in cookie */
1051		return (NULL);
1052	}
1053	chk_length = ntohs(init_cp->ch.chunk_length);
1054	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1055		return (NULL);
1056	}
1057	/*
1058	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1059	 * INIT-ACK follows the INIT chunk
1060	 */
1061	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1062	initack_cp = (struct sctp_init_ack_chunk *)
1063	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1064	    (uint8_t *) & initack_buf);
1065	if (initack_cp == NULL) {
1066		/* could not pull INIT-ACK chunk in cookie */
1067		return (NULL);
1068	}
1069	chk_length = ntohs(initack_cp->ch.chunk_length);
1070	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1071		return (NULL);
1072	}
1073	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1074	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1075		/*
1076		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1077		 * to get into the OPEN state
1078		 */
1079		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1080#ifdef INVARIANTS
1081			panic("Case D and non-match seq?");
1082#else
1083			SCTP_PRINTF("Case D, seq non-match %x vs %x?\n",
1084			    ntohl(initack_cp->init.initial_tsn),
1085			    asoc->init_seq_number);
1086#endif
1087		}
1088		switch SCTP_GET_STATE
1089			(asoc) {
1090		case SCTP_STATE_COOKIE_WAIT:
1091		case SCTP_STATE_COOKIE_ECHOED:
1092			/*
1093			 * INIT was sent but got a COOKIE_ECHO with the
1094			 * correct tags... just accept it...but we must
1095			 * process the init so that we can make sure we have
1096			 * the right seq no's.
1097			 */
1098			/* First we must process the INIT !! */
1099			retval = sctp_process_init(init_cp, stcb, net);
1100			if (retval < 0) {
1101				if (how_indx < sizeof(asoc->cookie_how))
1102					asoc->cookie_how[how_indx] = 3;
1103				return (NULL);
1104			}
1105			/* we have already processed the INIT so no problem */
1106			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1107			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1108			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1109			/* update current state */
1110			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1111				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1112			else
1113				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1114			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1115				asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1116				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1117				    stcb->sctp_ep, stcb, asoc->primary_destination);
1118
1119			} else {
1120				/* if ok, move to OPEN state */
1121				asoc->state = SCTP_STATE_OPEN;
1122			}
1123			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1124			sctp_stop_all_cookie_timers(stcb);
1125			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1126			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1127			    (inp->sctp_socket->so_qlimit == 0)
1128			    ) {
1129				/*
1130				 * Here is where collision would go if we
1131				 * did a connect() and instead got a
1132				 * init/init-ack/cookie done before the
1133				 * init-ack came back..
1134				 */
1135				stcb->sctp_ep->sctp_flags |=
1136				    SCTP_PCB_FLAGS_CONNECTED;
1137				soisconnected(stcb->sctp_ep->sctp_socket);
1138			}
1139			/* notify upper layer */
1140			*notification = SCTP_NOTIFY_ASSOC_UP;
1141			/*
1142			 * since we did not send a HB make sure we don't
1143			 * double things
1144			 */
1145			net->hb_responded = 1;
1146			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1147			    &cookie->time_entered);
1148
1149			if (stcb->asoc.sctp_autoclose_ticks &&
1150			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1151				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1152				    inp, stcb, NULL);
1153			}
1154			break;
1155		default:
1156			/*
1157			 * we're in the OPEN state (or beyond), so peer must
1158			 * have simply lost the COOKIE-ACK
1159			 */
1160			break;
1161			}	/* end switch */
1162		sctp_stop_all_cookie_timers(stcb);
1163		/*
1164		 * We ignore the return code here.. not sure if we should
1165		 * somehow abort.. but we do have an existing asoc. This
1166		 * really should not fail.
1167		 */
1168		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1169		    init_offset + sizeof(struct sctp_init_chunk),
1170		    initack_offset, sh, init_src)) {
1171			if (how_indx < sizeof(asoc->cookie_how))
1172				asoc->cookie_how[how_indx] = 4;
1173			return (NULL);
1174		}
1175		/* respond with a COOKIE-ACK */
1176		sctp_toss_old_cookies(stcb, asoc);
1177		sctp_send_cookie_ack(stcb);
1178		if (how_indx < sizeof(asoc->cookie_how))
1179			asoc->cookie_how[how_indx] = 5;
1180		return (stcb);
1181	}
1182	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1183	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1184	    cookie->tie_tag_my_vtag == 0 &&
1185	    cookie->tie_tag_peer_vtag == 0) {
1186		/*
1187		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1188		 */
1189		if (how_indx < sizeof(asoc->cookie_how))
1190			asoc->cookie_how[how_indx] = 6;
1191		return (NULL);
1192	}
1193	if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
1194	    (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
1195	    init_cp->init.initiate_tag == 0)) {
1196		/*
1197		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1198		 * should be ok, re-accept peer info
1199		 */
1200		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1201			/*
1202			 * Extension of case C. If we hit this, then the
1203			 * random number generator returned the same vtag
1204			 * when we first sent our INIT-ACK and when we later
1205			 * sent our INIT. The side with the seq numbers that
1206			 * are different will be the one that normnally
1207			 * would have hit case C. This in effect "extends"
1208			 * our vtags in this collision case to be 64 bits.
1209			 * The same collision could occur aka you get both
1210			 * vtag and seq number the same twice in a row.. but
1211			 * is much less likely. If it did happen then we
1212			 * would proceed through and bring up the assoc.. we
1213			 * may end up with the wrong stream setup however..
1214			 * which would be bad.. but there is no way to
1215			 * tell.. until we send on a stream that does not
1216			 * exist :-)
1217			 */
1218			if (how_indx < sizeof(asoc->cookie_how))
1219				asoc->cookie_how[how_indx] = 7;
1220
1221			return (NULL);
1222		}
1223		if (how_indx < sizeof(asoc->cookie_how))
1224			asoc->cookie_how[how_indx] = 8;
1225		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1226		sctp_stop_all_cookie_timers(stcb);
1227		/*
1228		 * since we did not send a HB make sure we don't double
1229		 * things
1230		 */
1231		net->hb_responded = 1;
1232		if (stcb->asoc.sctp_autoclose_ticks &&
1233		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1234			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1235			    NULL);
1236		}
1237		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1238		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1239
1240		/* Note last_cwr_tsn? where is this used? */
1241		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1242		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1243			/*
1244			 * Ok the peer probably discarded our data (if we
1245			 * echoed a cookie+data). So anything on the
1246			 * sent_queue should be marked for retransmit, we
1247			 * may not get something to kick us so it COULD
1248			 * still take a timeout to move these.. but it can't
1249			 * hurt to mark them.
1250			 */
1251			struct sctp_tmit_chunk *chk;
1252
1253			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1254				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1255					chk->sent = SCTP_DATAGRAM_RESEND;
1256					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1257					spec_flag++;
1258				}
1259			}
1260
1261		}
1262		/* process the INIT info (peer's info) */
1263		retval = sctp_process_init(init_cp, stcb, net);
1264		if (retval < 0) {
1265			if (how_indx < sizeof(asoc->cookie_how))
1266				asoc->cookie_how[how_indx] = 9;
1267			return (NULL);
1268		}
1269		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1270		    init_offset + sizeof(struct sctp_init_chunk),
1271		    initack_offset, sh, init_src)) {
1272			if (how_indx < sizeof(asoc->cookie_how))
1273				asoc->cookie_how[how_indx] = 10;
1274			return (NULL);
1275		}
1276		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1277		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1278			*notification = SCTP_NOTIFY_ASSOC_UP;
1279
1280			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1281			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1282			    (inp->sctp_socket->so_qlimit == 0)) {
1283				stcb->sctp_ep->sctp_flags |=
1284				    SCTP_PCB_FLAGS_CONNECTED;
1285				soisconnected(stcb->sctp_ep->sctp_socket);
1286			}
1287			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1288				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1289			else
1290				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1291			SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1292			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1293		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1294			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1295		} else {
1296			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1297		}
1298		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1299			asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1300			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1301			    stcb->sctp_ep, stcb, asoc->primary_destination);
1302
1303		} else {
1304			asoc->state = SCTP_STATE_OPEN;
1305		}
1306		sctp_stop_all_cookie_timers(stcb);
1307		sctp_toss_old_cookies(stcb, asoc);
1308		sctp_send_cookie_ack(stcb);
1309		if (spec_flag) {
1310			/*
1311			 * only if we have retrans set do we do this. What
1312			 * this call does is get only the COOKIE-ACK out and
1313			 * then when we return the normal call to
1314			 * sctp_chunk_output will get the retrans out behind
1315			 * this.
1316			 */
1317			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK);
1318		}
1319		if (how_indx < sizeof(asoc->cookie_how))
1320			asoc->cookie_how[how_indx] = 11;
1321
1322		return (stcb);
1323	}
1324	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1325	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1326	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1327	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1328	    cookie->tie_tag_peer_vtag != 0) {
1329		struct sctpasochead *head;
1330
1331		/*
1332		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1333		 */
1334		/* temp code */
1335		if (how_indx < sizeof(asoc->cookie_how))
1336			asoc->cookie_how[how_indx] = 12;
1337		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1338		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1339
1340		*sac_assoc_id = sctp_get_associd(stcb);
1341		/* notify upper layer */
1342		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1343		atomic_add_int(&stcb->asoc.refcnt, 1);
1344		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1345		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1346		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1347			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1348		}
1349		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1350			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1351		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1352			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1353		}
1354		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1355			asoc->state = SCTP_STATE_OPEN |
1356			    SCTP_STATE_SHUTDOWN_PENDING;
1357			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1358			    stcb->sctp_ep, stcb, asoc->primary_destination);
1359
1360		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1361			/* move to OPEN state, if not in SHUTDOWN_SENT */
1362			asoc->state = SCTP_STATE_OPEN;
1363		}
1364		asoc->pre_open_streams =
1365		    ntohs(initack_cp->init.num_outbound_streams);
1366		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1367		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1368
1369		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1370		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1371
1372		asoc->str_reset_seq_in = asoc->init_seq_number;
1373
1374		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1375		if (asoc->mapping_array) {
1376			memset(asoc->mapping_array, 0,
1377			    asoc->mapping_array_size);
1378		}
1379		SCTP_TCB_UNLOCK(stcb);
1380		SCTP_INP_INFO_WLOCK();
1381		SCTP_INP_WLOCK(stcb->sctp_ep);
1382		SCTP_TCB_LOCK(stcb);
1383		atomic_add_int(&stcb->asoc.refcnt, -1);
1384		/* send up all the data */
1385		SCTP_TCB_SEND_LOCK(stcb);
1386
1387		sctp_report_all_outbound(stcb, 1);
1388		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1389			stcb->asoc.strmout[i].stream_no = i;
1390			stcb->asoc.strmout[i].next_sequence_sent = 0;
1391			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1392		}
1393		/* process the INIT-ACK info (my info) */
1394		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1395		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1396
1397		/* pull from vtag hash */
1398		LIST_REMOVE(stcb, sctp_asocs);
1399		/* re-insert to new vtag position */
1400		head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1401		    sctppcbinfo.hashasocmark)];
1402		/*
1403		 * put it in the bucket in the vtag hash of assoc's for the
1404		 * system
1405		 */
1406		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1407
1408		/* Is this the first restart? */
1409		if (stcb->asoc.in_restart_hash == 0) {
1410			/* Ok add it to assoc_id vtag hash */
1411			head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
1412			    sctppcbinfo.hashrestartmark)];
1413			LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
1414			stcb->asoc.in_restart_hash = 1;
1415		}
1416		/* process the INIT info (peer's info) */
1417		SCTP_TCB_SEND_UNLOCK(stcb);
1418		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1419		SCTP_INP_INFO_WUNLOCK();
1420
1421		retval = sctp_process_init(init_cp, stcb, net);
1422		if (retval < 0) {
1423			if (how_indx < sizeof(asoc->cookie_how))
1424				asoc->cookie_how[how_indx] = 13;
1425
1426			return (NULL);
1427		}
1428		/*
1429		 * since we did not send a HB make sure we don't double
1430		 * things
1431		 */
1432		net->hb_responded = 1;
1433
1434		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1435		    init_offset + sizeof(struct sctp_init_chunk),
1436		    initack_offset, sh, init_src)) {
1437			if (how_indx < sizeof(asoc->cookie_how))
1438				asoc->cookie_how[how_indx] = 14;
1439
1440			return (NULL);
1441		}
1442		/* respond with a COOKIE-ACK */
1443		sctp_stop_all_cookie_timers(stcb);
1444		sctp_toss_old_cookies(stcb, asoc);
1445		sctp_send_cookie_ack(stcb);
1446		if (how_indx < sizeof(asoc->cookie_how))
1447			asoc->cookie_how[how_indx] = 15;
1448
1449		return (stcb);
1450	}
1451	if (how_indx < sizeof(asoc->cookie_how))
1452		asoc->cookie_how[how_indx] = 16;
1453	/* all other cases... */
1454	return (NULL);
1455}
1456
1457
1458/*
1459 * handle a state cookie for a new association m: input packet mbuf chain--
1460 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1461 * and the cookie signature does not exist offset: offset into mbuf to the
1462 * cookie-echo chunk length: length of the cookie chunk to: where the init
1463 * was from returns a new TCB
1464 */
1465static struct sctp_tcb *
1466sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1467    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1468    struct sctp_inpcb *inp, struct sctp_nets **netp,
1469    struct sockaddr *init_src, int *notification,
1470    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1471    uint32_t vrf_id)
1472{
1473	struct sctp_tcb *stcb;
1474	struct sctp_init_chunk *init_cp, init_buf;
1475	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1476	struct sockaddr_storage sa_store;
1477	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1478	struct sockaddr_in *sin;
1479	struct sockaddr_in6 *sin6;
1480	struct sctp_association *asoc;
1481	int chk_length;
1482	int init_offset, initack_offset, initack_limit;
1483	int retval;
1484	int error = 0;
1485	uint32_t old_tag;
1486	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1487
1488	/*
1489	 * find and validate the INIT chunk in the cookie (peer's info) the
1490	 * INIT should start after the cookie-echo header struct (chunk
1491	 * header, state cookie header struct)
1492	 */
1493	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1494	init_cp = (struct sctp_init_chunk *)
1495	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1496	    (uint8_t *) & init_buf);
1497	if (init_cp == NULL) {
1498		/* could not pull a INIT chunk in cookie */
1499		SCTPDBG(SCTP_DEBUG_INPUT1,
1500		    "process_cookie_new: could not pull INIT chunk hdr\n");
1501		return (NULL);
1502	}
1503	chk_length = ntohs(init_cp->ch.chunk_length);
1504	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1505		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1506		return (NULL);
1507	}
1508	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1509	/*
1510	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1511	 * INIT-ACK follows the INIT chunk
1512	 */
1513	initack_cp = (struct sctp_init_ack_chunk *)
1514	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1515	    (uint8_t *) & initack_buf);
1516	if (initack_cp == NULL) {
1517		/* could not pull INIT-ACK chunk in cookie */
1518		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1519		return (NULL);
1520	}
1521	chk_length = ntohs(initack_cp->ch.chunk_length);
1522	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1523		return (NULL);
1524	}
1525	/*
1526	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1527	 * "initack_limit" value.  This is because the chk_length field
1528	 * includes the length of the cookie, but the cookie is omitted when
1529	 * the INIT and INIT_ACK are tacked onto the cookie...
1530	 */
1531	initack_limit = offset + cookie_len;
1532
1533	/*
1534	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
1535	 * and popluate
1536	 */
1537	stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
1538	    ntohl(initack_cp->init.initiate_tag), vrf_id);
1539	if (stcb == NULL) {
1540		struct mbuf *op_err;
1541
1542		/* memory problem? */
1543		SCTPDBG(SCTP_DEBUG_INPUT1,
1544		    "process_cookie_new: no room for another TCB!\n");
1545		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1546
1547		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1548		    sh, op_err, vrf_id);
1549		return (NULL);
1550	}
1551	/* get the correct sctp_nets */
1552	if (netp)
1553		*netp = sctp_findnet(stcb, init_src);
1554
1555	asoc = &stcb->asoc;
1556	/* get scope variables out of cookie */
1557	asoc->ipv4_local_scope = cookie->ipv4_scope;
1558	asoc->site_scope = cookie->site_scope;
1559	asoc->local_scope = cookie->local_scope;
1560	asoc->loopback_scope = cookie->loopback_scope;
1561
1562	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
1563	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
1564		struct mbuf *op_err;
1565
1566		/*
1567		 * Houston we have a problem. The EP changed while the
1568		 * cookie was in flight. Only recourse is to abort the
1569		 * association.
1570		 */
1571		atomic_add_int(&stcb->asoc.refcnt, 1);
1572		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1573		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1574		    sh, op_err, vrf_id);
1575		atomic_add_int(&stcb->asoc.refcnt, -1);
1576		return (NULL);
1577	}
1578	/* process the INIT-ACK info (my info) */
1579	old_tag = asoc->my_vtag;
1580	asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1581	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1582	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1583	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1584	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1585	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1586	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1587	asoc->str_reset_seq_in = asoc->init_seq_number;
1588
1589	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1590
1591	/* process the INIT info (peer's info) */
1592	if (netp)
1593		retval = sctp_process_init(init_cp, stcb, *netp);
1594	else
1595		retval = 0;
1596	if (retval < 0) {
1597		atomic_add_int(&stcb->asoc.refcnt, 1);
1598		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1599		atomic_add_int(&stcb->asoc.refcnt, -1);
1600		return (NULL);
1601	}
1602	/* load all addresses */
1603	if (sctp_load_addresses_from_init(stcb, m, iphlen,
1604	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
1605	    init_src)) {
1606		atomic_add_int(&stcb->asoc.refcnt, 1);
1607		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1608		atomic_add_int(&stcb->asoc.refcnt, -1);
1609		return (NULL);
1610	}
1611	/*
1612	 * verify any preceding AUTH chunk that was skipped
1613	 */
1614	/* pull the local authentication parameters from the cookie/init-ack */
1615	sctp_auth_get_cookie_params(stcb, m,
1616	    initack_offset + sizeof(struct sctp_init_ack_chunk),
1617	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
1618	if (auth_skipped) {
1619		struct sctp_auth_chunk *auth;
1620
1621		auth = (struct sctp_auth_chunk *)
1622		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
1623		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
1624			/* auth HMAC failed, dump the assoc and packet */
1625			SCTPDBG(SCTP_DEBUG_AUTH1,
1626			    "COOKIE-ECHO: AUTH failed\n");
1627			sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
1628			return (NULL);
1629		} else {
1630			/* remaining chunks checked... good to go */
1631			stcb->asoc.authenticated = 1;
1632		}
1633	}
1634	/* update current state */
1635	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
1636	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1637		asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1638		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1639		    stcb->sctp_ep, stcb, asoc->primary_destination);
1640	} else {
1641		asoc->state = SCTP_STATE_OPEN;
1642	}
1643	sctp_stop_all_cookie_timers(stcb);
1644	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
1645	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1646
1647	/*
1648	 * if we're doing ASCONFs, check to see if we have any new local
1649	 * addresses that need to get added to the peer (eg. addresses
1650	 * changed while cookie echo in flight).  This needs to be done
1651	 * after we go to the OPEN state to do the correct asconf
1652	 * processing. else, make sure we have the correct addresses in our
1653	 * lists
1654	 */
1655
1656	/* warning, we re-use sin, sin6, sa_store here! */
1657	/* pull in local_address (our "from" address) */
1658	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
1659		/* source addr is IPv4 */
1660		sin = (struct sockaddr_in *)initack_src;
1661		memset(sin, 0, sizeof(*sin));
1662		sin->sin_family = AF_INET;
1663		sin->sin_len = sizeof(struct sockaddr_in);
1664		sin->sin_addr.s_addr = cookie->laddress[0];
1665	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
1666		/* source addr is IPv6 */
1667		sin6 = (struct sockaddr_in6 *)initack_src;
1668		memset(sin6, 0, sizeof(*sin6));
1669		sin6->sin6_family = AF_INET6;
1670		sin6->sin6_len = sizeof(struct sockaddr_in6);
1671		sin6->sin6_scope_id = cookie->scope_id;
1672		memcpy(&sin6->sin6_addr, cookie->laddress,
1673		    sizeof(sin6->sin6_addr));
1674	} else {
1675		atomic_add_int(&stcb->asoc.refcnt, 1);
1676		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
1677		atomic_add_int(&stcb->asoc.refcnt, -1);
1678		return (NULL);
1679	}
1680
1681	sctp_check_address_list(stcb, m,
1682	    initack_offset + sizeof(struct sctp_init_ack_chunk),
1683	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
1684	    initack_src, cookie->local_scope, cookie->site_scope,
1685	    cookie->ipv4_scope, cookie->loopback_scope);
1686
1687
1688	/* set up to notify upper layer */
1689	*notification = SCTP_NOTIFY_ASSOC_UP;
1690	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1691	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1692	    (inp->sctp_socket->so_qlimit == 0)) {
1693		/*
1694		 * This is an endpoint that called connect() how it got a
1695		 * cookie that is NEW is a bit of a mystery. It must be that
1696		 * the INIT was sent, but before it got there.. a complete
1697		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
1698		 * should have went to the other code.. not here.. oh well..
1699		 * a bit of protection is worth having..
1700		 */
1701		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1702		soisconnected(stcb->sctp_ep->sctp_socket);
1703	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1704	    (inp->sctp_socket->so_qlimit)) {
1705		/*
1706		 * We don't want to do anything with this one. Since it is
1707		 * the listening guy. The timer will get started for
1708		 * accepted connections in the caller.
1709		 */
1710		;
1711	}
1712	/* since we did not send a HB make sure we don't double things */
1713	if ((netp) && (*netp))
1714		(*netp)->hb_responded = 1;
1715
1716	if (stcb->asoc.sctp_autoclose_ticks &&
1717	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1718		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
1719	}
1720	/* respond with a COOKIE-ACK */
1721	/* calculate the RTT */
1722	if ((netp) && (*netp)) {
1723		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
1724		    &cookie->time_entered);
1725	}
1726	sctp_send_cookie_ack(stcb);
1727	return (stcb);
1728}
1729
1730
1731/*
1732 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
1733 * existing (non-NULL) TCB
1734 */
1735static struct mbuf *
1736sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
1737    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
1738    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
1739    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1740    struct sctp_tcb **locked_tcb, uint32_t vrf_id)
1741{
1742	struct sctp_state_cookie *cookie;
1743	struct sockaddr_in6 sin6;
1744	struct sockaddr_in sin;
1745	struct sctp_tcb *l_stcb = *stcb;
1746	struct sctp_inpcb *l_inp;
1747	struct sockaddr *to;
1748	sctp_assoc_t sac_restart_id;
1749	struct sctp_pcb *ep;
1750	struct mbuf *m_sig;
1751	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
1752	uint8_t *sig;
1753	uint8_t cookie_ok = 0;
1754	unsigned int size_of_pkt, sig_offset, cookie_offset;
1755	unsigned int cookie_len;
1756	struct timeval now;
1757	struct timeval time_expires;
1758	struct sockaddr_storage dest_store;
1759	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
1760	struct ip *iph;
1761	int notification = 0;
1762	struct sctp_nets *netl;
1763	int had_a_existing_tcb = 0;
1764
1765	SCTPDBG(SCTP_DEBUG_INPUT2,
1766	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
1767
1768	if (inp_p == NULL) {
1769		return (NULL);
1770	}
1771	/* First get the destination address setup too. */
1772	iph = mtod(m, struct ip *);
1773	if (iph->ip_v == IPVERSION) {
1774		/* its IPv4 */
1775		struct sockaddr_in *lsin;
1776
1777		lsin = (struct sockaddr_in *)(localep_sa);
1778		memset(lsin, 0, sizeof(*lsin));
1779		lsin->sin_family = AF_INET;
1780		lsin->sin_len = sizeof(*lsin);
1781		lsin->sin_port = sh->dest_port;
1782		lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
1783		size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
1784	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1785		/* its IPv6 */
1786		struct ip6_hdr *ip6;
1787		struct sockaddr_in6 *lsin6;
1788
1789		lsin6 = (struct sockaddr_in6 *)(localep_sa);
1790		memset(lsin6, 0, sizeof(*lsin6));
1791		lsin6->sin6_family = AF_INET6;
1792		lsin6->sin6_len = sizeof(struct sockaddr_in6);
1793		ip6 = mtod(m, struct ip6_hdr *);
1794		lsin6->sin6_port = sh->dest_port;
1795		lsin6->sin6_addr = ip6->ip6_dst;
1796		size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
1797	} else {
1798		return (NULL);
1799	}
1800
1801	cookie = &cp->cookie;
1802	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
1803	cookie_len = ntohs(cp->ch.chunk_length);
1804
1805	if ((cookie->peerport != sh->src_port) &&
1806	    (cookie->myport != sh->dest_port) &&
1807	    (cookie->my_vtag != sh->v_tag)) {
1808		/*
1809		 * invalid ports or bad tag.  Note that we always leave the
1810		 * v_tag in the header in network order and when we stored
1811		 * it in the my_vtag slot we also left it in network order.
1812		 * This maintains the match even though it may be in the
1813		 * opposite byte order of the machine :->
1814		 */
1815		return (NULL);
1816	}
1817	if (cookie_len > size_of_pkt ||
1818	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
1819	    sizeof(struct sctp_init_chunk) +
1820	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
1821		/* cookie too long!  or too small */
1822		return (NULL);
1823	}
1824	/*
1825	 * split off the signature into its own mbuf (since it should not be
1826	 * calculated in the sctp_hmac_m() call).
1827	 */
1828	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
1829	if (sig_offset > size_of_pkt) {
1830		/* packet not correct size! */
1831		/* XXX this may already be accounted for earlier... */
1832		return (NULL);
1833	}
1834	m_sig = m_split(m, sig_offset, M_DONTWAIT);
1835	if (m_sig == NULL) {
1836		/* out of memory or ?? */
1837		return (NULL);
1838	}
1839	/*
1840	 * compute the signature/digest for the cookie
1841	 */
1842	ep = &(*inp_p)->sctp_ep;
1843	l_inp = *inp_p;
1844	if (l_stcb) {
1845		SCTP_TCB_UNLOCK(l_stcb);
1846	}
1847	SCTP_INP_RLOCK(l_inp);
1848	if (l_stcb) {
1849		SCTP_TCB_LOCK(l_stcb);
1850	}
1851	/* which cookie is it? */
1852	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
1853	    (ep->current_secret_number != ep->last_secret_number)) {
1854		/* it's the old cookie */
1855		(void)sctp_hmac_m(SCTP_HMAC,
1856		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1857		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1858	} else {
1859		/* it's the current cookie */
1860		(void)sctp_hmac_m(SCTP_HMAC,
1861		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
1862		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1863	}
1864	/* get the signature */
1865	SCTP_INP_RUNLOCK(l_inp);
1866	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
1867	if (sig == NULL) {
1868		/* couldn't find signature */
1869		sctp_m_freem(m_sig);
1870		return (NULL);
1871	}
1872	/* compare the received digest with the computed digest */
1873	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
1874		/* try the old cookie? */
1875		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
1876		    (ep->current_secret_number != ep->last_secret_number)) {
1877			/* compute digest with old */
1878			(void)sctp_hmac_m(SCTP_HMAC,
1879			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1880			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1881			/* compare */
1882			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
1883				cookie_ok = 1;
1884		}
1885	} else {
1886		cookie_ok = 1;
1887	}
1888
1889	/*
1890	 * Now before we continue we must reconstruct our mbuf so that
1891	 * normal processing of any other chunks will work.
1892	 */
1893	{
1894		struct mbuf *m_at;
1895
1896		m_at = m;
1897		while (SCTP_BUF_NEXT(m_at) != NULL) {
1898			m_at = SCTP_BUF_NEXT(m_at);
1899		}
1900		SCTP_BUF_NEXT(m_at) = m_sig;
1901	}
1902
1903	if (cookie_ok == 0) {
1904		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
1905		SCTPDBG(SCTP_DEBUG_INPUT2,
1906		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
1907		    (uint32_t) offset, cookie_offset, sig_offset);
1908		return (NULL);
1909	}
1910	/*
1911	 * check the cookie timestamps to be sure it's not stale
1912	 */
1913	(void)SCTP_GETTIME_TIMEVAL(&now);
1914	/* Expire time is in Ticks, so we convert to seconds */
1915	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
1916	time_expires.tv_usec = cookie->time_entered.tv_usec;
1917	if (timevalcmp(&now, &time_expires, >)) {
1918		/* cookie is stale! */
1919		struct mbuf *op_err;
1920		struct sctp_stale_cookie_msg *scm;
1921		uint32_t tim;
1922
1923		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
1924		    0, M_DONTWAIT, 1, MT_DATA);
1925		if (op_err == NULL) {
1926			/* FOOBAR */
1927			return (NULL);
1928		}
1929		/* pre-reserve some space */
1930		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1931		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1932		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1933
1934		/* Set the len */
1935		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
1936		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
1937		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
1938		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
1939		    (sizeof(uint32_t))));
1940		/* seconds to usec */
1941		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
1942		/* add in usec */
1943		if (tim == 0)
1944			tim = now.tv_usec - cookie->time_entered.tv_usec;
1945		scm->time_usec = htonl(tim);
1946		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1947		    vrf_id);
1948		return (NULL);
1949	}
1950	/*
1951	 * Now we must see with the lookup address if we have an existing
1952	 * asoc. This will only happen if we were in the COOKIE-WAIT state
1953	 * and a INIT collided with us and somewhere the peer sent the
1954	 * cookie on another address besides the single address our assoc
1955	 * had for him. In this case we will have one of the tie-tags set at
1956	 * least AND the address field in the cookie can be used to look it
1957	 * up.
1958	 */
1959	to = NULL;
1960	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
1961		memset(&sin6, 0, sizeof(sin6));
1962		sin6.sin6_family = AF_INET6;
1963		sin6.sin6_len = sizeof(sin6);
1964		sin6.sin6_port = sh->src_port;
1965		sin6.sin6_scope_id = cookie->scope_id;
1966		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
1967		    sizeof(sin6.sin6_addr.s6_addr));
1968		to = (struct sockaddr *)&sin6;
1969	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
1970		memset(&sin, 0, sizeof(sin));
1971		sin.sin_family = AF_INET;
1972		sin.sin_len = sizeof(sin);
1973		sin.sin_port = sh->src_port;
1974		sin.sin_addr.s_addr = cookie->address[0];
1975		to = (struct sockaddr *)&sin;
1976	} else {
1977		/* This should not happen */
1978		return (NULL);
1979	}
1980	if ((*stcb == NULL) && to) {
1981		/* Yep, lets check */
1982		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
1983		if (*stcb == NULL) {
1984			/*
1985			 * We should have only got back the same inp. If we
1986			 * got back a different ep we have a problem. The
1987			 * original findep got back l_inp and now
1988			 */
1989			if (l_inp != *inp_p) {
1990				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
1991			}
1992		} else {
1993			if (*locked_tcb == NULL) {
1994				/*
1995				 * In this case we found the assoc only
1996				 * after we locked the create lock. This
1997				 * means we are in a colliding case and we
1998				 * must make sure that we unlock the tcb if
1999				 * its one of the cases where we throw away
2000				 * the incoming packets.
2001				 */
2002				*locked_tcb = *stcb;
2003
2004				/*
2005				 * We must also increment the inp ref count
2006				 * since the ref_count flags was set when we
2007				 * did not find the TCB, now we found it
2008				 * which reduces the refcount.. we must
2009				 * raise it back out to balance it all :-)
2010				 */
2011				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2012				if ((*stcb)->sctp_ep != l_inp) {
2013					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2014					    (*stcb)->sctp_ep, l_inp);
2015				}
2016			}
2017		}
2018	}
2019	if (to == NULL)
2020		return (NULL);
2021
2022	cookie_len -= SCTP_SIGNATURE_SIZE;
2023	if (*stcb == NULL) {
2024		/* this is the "normal" case... get a new TCB */
2025		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2026		    cookie_len, *inp_p, netp, to, &notification,
2027		    auth_skipped, auth_offset, auth_len, vrf_id);
2028	} else {
2029		/* this is abnormal... cookie-echo on existing TCB */
2030		had_a_existing_tcb = 1;
2031		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2032		    cookie, cookie_len, *inp_p, *stcb, *netp, to,
2033		    &notification, &sac_restart_id, vrf_id);
2034	}
2035
2036	if (*stcb == NULL) {
2037		/* still no TCB... must be bad cookie-echo */
2038		return (NULL);
2039	}
2040	/*
2041	 * Ok, we built an association so confirm the address we sent the
2042	 * INIT-ACK to.
2043	 */
2044	netl = sctp_findnet(*stcb, to);
2045	/*
2046	 * This code should in theory NOT run but
2047	 */
2048	if (netl == NULL) {
2049		/* TSNH! Huh, why do I need to add this address here? */
2050		int ret;
2051
2052		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2053		    SCTP_IN_COOKIE_PROC);
2054		netl = sctp_findnet(*stcb, to);
2055	}
2056	if (netl) {
2057		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2058			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2059			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2060			    netl);
2061			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2062			    (*stcb), 0, (void *)netl);
2063		}
2064	}
2065	if (*stcb) {
2066		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2067		    *stcb, NULL);
2068	}
2069	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2070		if (!had_a_existing_tcb ||
2071		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2072			/*
2073			 * If we have a NEW cookie or the connect never
2074			 * reached the connected state during collision we
2075			 * must do the TCP accept thing.
2076			 */
2077			struct socket *so, *oso;
2078			struct sctp_inpcb *inp;
2079
2080			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2081				/*
2082				 * For a restart we will keep the same
2083				 * socket, no need to do anything. I THINK!!
2084				 */
2085				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id);
2086				return (m);
2087			}
2088			oso = (*inp_p)->sctp_socket;
2089			/*
2090			 * We do this to keep the sockets side happy durin
2091			 * the sonewcon ONLY.
2092			 */
2093			NET_LOCK_GIANT();
2094			SCTP_TCB_UNLOCK((*stcb));
2095			so = sonewconn(oso, 0
2096			    );
2097			NET_UNLOCK_GIANT();
2098			SCTP_INP_WLOCK((*stcb)->sctp_ep);
2099			SCTP_TCB_LOCK((*stcb));
2100			SCTP_INP_WUNLOCK((*stcb)->sctp_ep);
2101			if (so == NULL) {
2102				struct mbuf *op_err;
2103
2104				/* Too many sockets */
2105				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2106				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2107				sctp_abort_association(*inp_p, NULL, m, iphlen,
2108				    sh, op_err, vrf_id);
2109				sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2110				return (NULL);
2111			}
2112			inp = (struct sctp_inpcb *)so->so_pcb;
2113			SCTP_INP_INCR_REF(inp);
2114			/*
2115			 * We add the unbound flag here so that if we get an
2116			 * soabort() before we get the move_pcb done, we
2117			 * will properly cleanup.
2118			 */
2119			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2120			    SCTP_PCB_FLAGS_CONNECTED |
2121			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2122			    SCTP_PCB_FLAGS_UNBOUND |
2123			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2124			    SCTP_PCB_FLAGS_DONT_WAKE);
2125			inp->sctp_features = (*inp_p)->sctp_features;
2126			inp->sctp_socket = so;
2127			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2128			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2129			inp->sctp_context = (*inp_p)->sctp_context;
2130			inp->inp_starting_point_for_iterator = NULL;
2131			/*
2132			 * copy in the authentication parameters from the
2133			 * original endpoint
2134			 */
2135			if (inp->sctp_ep.local_hmacs)
2136				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2137			inp->sctp_ep.local_hmacs =
2138			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2139			if (inp->sctp_ep.local_auth_chunks)
2140				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2141			inp->sctp_ep.local_auth_chunks =
2142			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2143			(void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
2144			    &inp->sctp_ep.shared_keys);
2145
2146			/*
2147			 * Now we must move it from one hash table to
2148			 * another and get the tcb in the right place.
2149			 */
2150			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2151
2152			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2153			SCTP_TCB_UNLOCK((*stcb));
2154
2155			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
2156			SCTP_TCB_LOCK((*stcb));
2157			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2158
2159
2160			/*
2161			 * now we must check to see if we were aborted while
2162			 * the move was going on and the lock/unlock
2163			 * happened.
2164			 */
2165			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2166				/*
2167				 * yep it was, we leave the assoc attached
2168				 * to the socket since the sctp_inpcb_free()
2169				 * call will send an abort for us.
2170				 */
2171				SCTP_INP_DECR_REF(inp);
2172				return (NULL);
2173			}
2174			SCTP_INP_DECR_REF(inp);
2175			/* Switch over to the new guy */
2176			*inp_p = inp;
2177			sctp_ulp_notify(notification, *stcb, 0, NULL);
2178
2179			/*
2180			 * Pull it from the incomplete queue and wake the
2181			 * guy
2182			 */
2183			soisconnected(so);
2184			return (m);
2185		}
2186	}
2187	if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2188		sctp_ulp_notify(notification, *stcb, 0, NULL);
2189	}
2190	return (m);
2191}
2192
2193static void
2194sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2195    struct sctp_tcb *stcb, struct sctp_nets *net)
2196{
2197	/* cp must not be used, others call this without a c-ack :-) */
2198	struct sctp_association *asoc;
2199
2200	SCTPDBG(SCTP_DEBUG_INPUT2,
2201	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2202	if (stcb == NULL)
2203		return;
2204
2205	asoc = &stcb->asoc;
2206
2207	sctp_stop_all_cookie_timers(stcb);
2208	/* process according to association state */
2209	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2210		/* state change only needed when I am in right state */
2211		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2212		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2213			asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
2214			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2215			    stcb->sctp_ep, stcb, asoc->primary_destination);
2216
2217		} else {
2218			asoc->state = SCTP_STATE_OPEN;
2219		}
2220		/* update RTO */
2221		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2222		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2223		if (asoc->overall_error_count == 0) {
2224			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2225			    &asoc->time_entered);
2226		}
2227		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2228		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL);
2229		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2230		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2231			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2232			soisconnected(stcb->sctp_ep->sctp_socket);
2233		}
2234		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2235		    stcb, net);
2236		/*
2237		 * since we did not send a HB make sure we don't double
2238		 * things
2239		 */
2240		net->hb_responded = 1;
2241
2242		if (stcb->asoc.sctp_autoclose_ticks &&
2243		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2244			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2245			    stcb->sctp_ep, stcb, NULL);
2246		}
2247		/*
2248		 * set ASCONF timer if ASCONFs are pending and allowed (eg.
2249		 * addresses changed when init/cookie echo in flight)
2250		 */
2251		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2252		    (stcb->asoc.peer_supports_asconf) &&
2253		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2254			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2255			    stcb->sctp_ep, stcb,
2256			    stcb->asoc.primary_destination);
2257		}
2258	}
2259	/* Toss the cookie if I can */
2260	sctp_toss_old_cookies(stcb, asoc);
2261	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2262		/* Restart the timer if we have pending data */
2263		struct sctp_tmit_chunk *chk;
2264
2265		chk = TAILQ_FIRST(&asoc->sent_queue);
2266		if (chk) {
2267			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2268			    stcb, chk->whoTo);
2269		}
2270	}
2271}
2272
2273static void
2274sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2275    struct sctp_tcb *stcb)
2276{
2277	struct sctp_nets *net;
2278	struct sctp_tmit_chunk *lchk;
2279	uint32_t tsn;
2280
2281	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2282		return;
2283	}
2284	SCTP_STAT_INCR(sctps_recvecne);
2285	tsn = ntohl(cp->tsn);
2286	/* ECN Nonce stuff: need a resync and disable the nonce sum check */
2287	/* Also we make sure we disable the nonce_wait */
2288	lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2289	if (lchk == NULL) {
2290		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2291	} else {
2292		stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2293	}
2294	stcb->asoc.nonce_wait_for_ecne = 0;
2295	stcb->asoc.nonce_sum_check = 0;
2296
2297	/* Find where it was sent, if possible */
2298	net = NULL;
2299	lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2300	while (lchk) {
2301		if (lchk->rec.data.TSN_seq == tsn) {
2302			net = lchk->whoTo;
2303			break;
2304		}
2305		if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2306			break;
2307		lchk = TAILQ_NEXT(lchk, sctp_next);
2308	}
2309	if (net == NULL)
2310		/* default is we use the primary */
2311		net = stcb->asoc.primary_destination;
2312
2313	if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2314		int old_cwnd;
2315
2316		old_cwnd = net->cwnd;
2317		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2318		net->ssthresh = net->cwnd / 2;
2319		if (net->ssthresh < net->mtu) {
2320			net->ssthresh = net->mtu;
2321			/* here back off the timer as well, to slow us down */
2322			net->RTO <<= 1;
2323		}
2324		net->cwnd = net->ssthresh;
2325		if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
2326			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2327		}
2328		/*
2329		 * we reduce once every RTT. So we will only lower cwnd at
2330		 * the next sending seq i.e. the resync_tsn.
2331		 */
2332		stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2333	}
2334	/*
2335	 * We always send a CWR this way if our previous one was lost our
2336	 * peer will get an update, or if it is not time again to reduce we
2337	 * still get the cwr to the peer.
2338	 */
2339	sctp_send_cwr(stcb, net, tsn);
2340}
2341
2342static void
2343sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2344{
2345	/*
2346	 * Here we get a CWR from the peer. We must look in the outqueue and
2347	 * make sure that we have a covered ECNE in teh control chunk part.
2348	 * If so remove it.
2349	 */
2350	struct sctp_tmit_chunk *chk;
2351	struct sctp_ecne_chunk *ecne;
2352
2353	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2354		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2355			continue;
2356		}
2357		/*
2358		 * Look for and remove if it is the right TSN. Since there
2359		 * is only ONE ECNE on the control queue at any one time we
2360		 * don't need to worry about more than one!
2361		 */
2362		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2363		if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2364		    MAX_TSN) || (cp->tsn == ecne->tsn)) {
2365			/* this covers this ECNE, we can remove it */
2366			stcb->asoc.ecn_echo_cnt_onq--;
2367			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2368			    sctp_next);
2369			if (chk->data) {
2370				sctp_m_freem(chk->data);
2371				chk->data = NULL;
2372			}
2373			stcb->asoc.ctrl_queue_cnt--;
2374			sctp_free_remote_addr(chk->whoTo);
2375			sctp_free_a_chunk(stcb, chk);
2376			break;
2377		}
2378	}
2379}
2380
2381static void
2382sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2383    struct sctp_tcb *stcb, struct sctp_nets *net)
2384{
2385	struct sctp_association *asoc;
2386
2387	SCTPDBG(SCTP_DEBUG_INPUT2,
2388	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
2389	if (stcb == NULL)
2390		return;
2391
2392	asoc = &stcb->asoc;
2393	/* process according to association state */
2394	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
2395		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
2396		SCTP_TCB_UNLOCK(stcb);
2397		return;
2398	}
2399	/* notify upper layer protocol */
2400	if (stcb->sctp_socket) {
2401		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
2402		/* are the queues empty? they should be */
2403		if (!TAILQ_EMPTY(&asoc->send_queue) ||
2404		    !TAILQ_EMPTY(&asoc->sent_queue) ||
2405		    !TAILQ_EMPTY(&asoc->out_wheel)) {
2406			sctp_report_all_outbound(stcb, 0);
2407		}
2408	}
2409	/* stop the timer */
2410	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2411	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
2412	/* free the TCB */
2413	sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2414	return;
2415}
2416
2417static int
2418process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
2419    struct sctp_nets *net, uint8_t flg)
2420{
2421	switch (desc->chunk_type) {
2422		case SCTP_DATA:
2423		/* find the tsn to resend (possibly */
2424		{
2425			uint32_t tsn;
2426			struct sctp_tmit_chunk *tp1;
2427
2428			tsn = ntohl(desc->tsn_ifany);
2429			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2430			while (tp1) {
2431				if (tp1->rec.data.TSN_seq == tsn) {
2432					/* found it */
2433					break;
2434				}
2435				if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
2436				    MAX_TSN)) {
2437					/* not found */
2438					tp1 = NULL;
2439					break;
2440				}
2441				tp1 = TAILQ_NEXT(tp1, sctp_next);
2442			}
2443			if (tp1 == NULL) {
2444				/*
2445				 * Do it the other way , aka without paying
2446				 * attention to queue seq order.
2447				 */
2448				SCTP_STAT_INCR(sctps_pdrpdnfnd);
2449				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2450				while (tp1) {
2451					if (tp1->rec.data.TSN_seq == tsn) {
2452						/* found it */
2453						break;
2454					}
2455					tp1 = TAILQ_NEXT(tp1, sctp_next);
2456				}
2457			}
2458			if (tp1 == NULL) {
2459				SCTP_STAT_INCR(sctps_pdrptsnnf);
2460			}
2461			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
2462				uint8_t *ddp;
2463
2464				if ((stcb->asoc.peers_rwnd == 0) &&
2465				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
2466					SCTP_STAT_INCR(sctps_pdrpdiwnp);
2467					return (0);
2468				}
2469				if (stcb->asoc.peers_rwnd == 0 &&
2470				    (flg & SCTP_FROM_MIDDLE_BOX)) {
2471					SCTP_STAT_INCR(sctps_pdrpdizrw);
2472					return (0);
2473				}
2474				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
2475				    sizeof(struct sctp_data_chunk));
2476				{
2477					unsigned int iii;
2478
2479					for (iii = 0; iii < sizeof(desc->data_bytes);
2480					    iii++) {
2481						if (ddp[iii] != desc->data_bytes[iii]) {
2482							SCTP_STAT_INCR(sctps_pdrpbadd);
2483							return (-1);
2484						}
2485					}
2486				}
2487				/*
2488				 * We zero out the nonce so resync not
2489				 * needed
2490				 */
2491				tp1->rec.data.ect_nonce = 0;
2492
2493				if (tp1->do_rtt) {
2494					/*
2495					 * this guy had a RTO calculation
2496					 * pending on it, cancel it
2497					 */
2498					tp1->do_rtt = 0;
2499				}
2500				SCTP_STAT_INCR(sctps_pdrpmark);
2501				if (tp1->sent != SCTP_DATAGRAM_RESEND)
2502					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2503				tp1->sent = SCTP_DATAGRAM_RESEND;
2504				/*
2505				 * mark it as if we were doing a FR, since
2506				 * we will be getting gap ack reports behind
2507				 * the info from the router.
2508				 */
2509				tp1->rec.data.doing_fast_retransmit = 1;
2510				/*
2511				 * mark the tsn with what sequences can
2512				 * cause a new FR.
2513				 */
2514				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
2515					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
2516				} else {
2517					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
2518				}
2519
2520				/* restart the timer */
2521				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2522				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2523				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2524				    stcb, tp1->whoTo);
2525
2526				/* fix counts and things */
2527				if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
2528					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
2529					    tp1->whoTo->flight_size,
2530					    tp1->book_size,
2531					    (uintptr_t) stcb,
2532					    tp1->rec.data.TSN_seq);
2533				}
2534				sctp_flight_size_decrease(tp1);
2535				sctp_total_flight_decrease(stcb, tp1);
2536			} {
2537				/* audit code */
2538				unsigned int audit;
2539
2540				audit = 0;
2541				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
2542					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2543						audit++;
2544				}
2545				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
2546				    sctp_next) {
2547					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2548						audit++;
2549				}
2550				if (audit != stcb->asoc.sent_queue_retran_cnt) {
2551					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
2552					    audit, stcb->asoc.sent_queue_retran_cnt);
2553#ifndef SCTP_AUDITING_ENABLED
2554					stcb->asoc.sent_queue_retran_cnt = audit;
2555#endif
2556				}
2557			}
2558		}
2559		break;
2560	case SCTP_ASCONF:
2561		{
2562			struct sctp_tmit_chunk *asconf;
2563
2564			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
2565			    sctp_next) {
2566				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
2567					break;
2568				}
2569			}
2570			if (asconf) {
2571				if (asconf->sent != SCTP_DATAGRAM_RESEND)
2572					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2573				asconf->sent = SCTP_DATAGRAM_RESEND;
2574				asconf->snd_count--;
2575			}
2576		}
2577		break;
2578	case SCTP_INITIATION:
2579		/* resend the INIT */
2580		stcb->asoc.dropped_special_cnt++;
2581		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
2582			/*
2583			 * If we can get it in, in a few attempts we do
2584			 * this, otherwise we let the timer fire.
2585			 */
2586			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
2587			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
2588			sctp_send_initiate(stcb->sctp_ep, stcb);
2589		}
2590		break;
2591	case SCTP_SELECTIVE_ACK:
2592		/* resend the sack */
2593		sctp_send_sack(stcb);
2594		break;
2595	case SCTP_HEARTBEAT_REQUEST:
2596		/* resend a demand HB */
2597		(void)sctp_send_hb(stcb, 1, net);
2598		break;
2599	case SCTP_SHUTDOWN:
2600		sctp_send_shutdown(stcb, net);
2601		break;
2602	case SCTP_SHUTDOWN_ACK:
2603		sctp_send_shutdown_ack(stcb, net);
2604		break;
2605	case SCTP_COOKIE_ECHO:
2606		{
2607			struct sctp_tmit_chunk *cookie;
2608
2609			cookie = NULL;
2610			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
2611			    sctp_next) {
2612				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
2613					break;
2614				}
2615			}
2616			if (cookie) {
2617				if (cookie->sent != SCTP_DATAGRAM_RESEND)
2618					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2619				cookie->sent = SCTP_DATAGRAM_RESEND;
2620				sctp_stop_all_cookie_timers(stcb);
2621			}
2622		}
2623		break;
2624	case SCTP_COOKIE_ACK:
2625		sctp_send_cookie_ack(stcb);
2626		break;
2627	case SCTP_ASCONF_ACK:
2628		/* resend last asconf ack */
2629		sctp_send_asconf_ack(stcb, 1);
2630		break;
2631	case SCTP_FORWARD_CUM_TSN:
2632		send_forward_tsn(stcb, &stcb->asoc);
2633		break;
2634		/* can't do anything with these */
2635	case SCTP_PACKET_DROPPED:
2636	case SCTP_INITIATION_ACK:	/* this should not happen */
2637	case SCTP_HEARTBEAT_ACK:
2638	case SCTP_ABORT_ASSOCIATION:
2639	case SCTP_OPERATION_ERROR:
2640	case SCTP_SHUTDOWN_COMPLETE:
2641	case SCTP_ECN_ECHO:
2642	case SCTP_ECN_CWR:
2643	default:
2644		break;
2645	}
2646	return (0);
2647}
2648
2649void
2650sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2651{
2652	int i;
2653	uint16_t temp;
2654
2655	/*
2656	 * We set things to 0xffff since this is the last delivered sequence
2657	 * and we will be sending in 0 after the reset.
2658	 */
2659
2660	if (number_entries) {
2661		for (i = 0; i < number_entries; i++) {
2662			temp = ntohs(list[i]);
2663			if (temp >= stcb->asoc.streamincnt) {
2664				continue;
2665			}
2666			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
2667		}
2668	} else {
2669		list = NULL;
2670		for (i = 0; i < stcb->asoc.streamincnt; i++) {
2671			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
2672		}
2673	}
2674	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list);
2675}
2676
2677static void
2678sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2679{
2680	int i;
2681
2682	if (number_entries == 0) {
2683		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2684			stcb->asoc.strmout[i].next_sequence_sent = 0;
2685		}
2686	} else if (number_entries) {
2687		for (i = 0; i < number_entries; i++) {
2688			uint16_t temp;
2689
2690			temp = ntohs(list[i]);
2691			if (temp >= stcb->asoc.streamoutcnt) {
2692				/* no such stream */
2693				continue;
2694			}
2695			stcb->asoc.strmout[temp].next_sequence_sent = 0;
2696		}
2697	}
2698	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list);
2699}
2700
2701
2702struct sctp_stream_reset_out_request *
2703sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
2704{
2705	struct sctp_association *asoc;
2706	struct sctp_stream_reset_out_req *req;
2707	struct sctp_stream_reset_out_request *r;
2708	struct sctp_tmit_chunk *chk;
2709	int len, clen;
2710
2711	asoc = &stcb->asoc;
2712	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
2713		asoc->stream_reset_outstanding = 0;
2714		return (NULL);
2715	}
2716	if (stcb->asoc.str_reset == NULL) {
2717		asoc->stream_reset_outstanding = 0;
2718		return (NULL);
2719	}
2720	chk = stcb->asoc.str_reset;
2721	if (chk->data == NULL) {
2722		return (NULL);
2723	}
2724	if (bchk) {
2725		/* he wants a copy of the chk pointer */
2726		*bchk = chk;
2727	}
2728	clen = chk->send_size;
2729	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
2730	r = &req->sr_req;
2731	if (ntohl(r->request_seq) == seq) {
2732		/* found it */
2733		return (r);
2734	}
2735	len = SCTP_SIZE32(ntohs(r->ph.param_length));
2736	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
2737		/* move to the next one, there can only be a max of two */
2738		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
2739		if (ntohl(r->request_seq) == seq) {
2740			return (r);
2741		}
2742	}
2743	/* that seq is not here */
2744	return (NULL);
2745}
2746
2747static void
2748sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
2749{
2750	struct sctp_association *asoc;
2751	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
2752
2753	if (stcb->asoc.str_reset == NULL) {
2754		return;
2755	}
2756	asoc = &stcb->asoc;
2757
2758	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
2759	TAILQ_REMOVE(&asoc->control_send_queue,
2760	    chk,
2761	    sctp_next);
2762	if (chk->data) {
2763		sctp_m_freem(chk->data);
2764		chk->data = NULL;
2765	}
2766	asoc->ctrl_queue_cnt--;
2767	sctp_free_remote_addr(chk->whoTo);
2768
2769	sctp_free_a_chunk(stcb, chk);
2770	stcb->asoc.str_reset = NULL;
2771}
2772
2773
2774static int
2775sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
2776    uint32_t seq, uint32_t action,
2777    struct sctp_stream_reset_response *respin)
2778{
2779	uint16_t type;
2780	int lparm_len;
2781	struct sctp_association *asoc = &stcb->asoc;
2782	struct sctp_tmit_chunk *chk;
2783	struct sctp_stream_reset_out_request *srparam;
2784	int number_entries;
2785
2786	if (asoc->stream_reset_outstanding == 0) {
2787		/* duplicate */
2788		return (0);
2789	}
2790	if (seq == stcb->asoc.str_reset_seq_out) {
2791		srparam = sctp_find_stream_reset(stcb, seq, &chk);
2792		if (srparam) {
2793			stcb->asoc.str_reset_seq_out++;
2794			type = ntohs(srparam->ph.param_type);
2795			lparm_len = ntohs(srparam->ph.param_length);
2796			if (type == SCTP_STR_RESET_OUT_REQUEST) {
2797				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
2798				asoc->stream_reset_out_is_outstanding = 0;
2799				if (asoc->stream_reset_outstanding)
2800					asoc->stream_reset_outstanding--;
2801				if (action == SCTP_STREAM_RESET_PERFORMED) {
2802					/* do it */
2803					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
2804				} else {
2805					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams);
2806				}
2807			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
2808				/* Answered my request */
2809				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
2810				if (asoc->stream_reset_outstanding)
2811					asoc->stream_reset_outstanding--;
2812				if (action != SCTP_STREAM_RESET_PERFORMED) {
2813					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams);
2814				}
2815			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
2816				/**
2817				 * a) Adopt the new in tsn.
2818				 * b) reset the map
2819				 * c) Adopt the new out-tsn
2820				 */
2821				struct sctp_stream_reset_response_tsn *resp;
2822				struct sctp_forward_tsn_chunk fwdtsn;
2823				int abort_flag = 0;
2824
2825				if (respin == NULL) {
2826					/* huh ? */
2827					return (0);
2828				}
2829				if (action == SCTP_STREAM_RESET_PERFORMED) {
2830					resp = (struct sctp_stream_reset_response_tsn *)respin;
2831					asoc->stream_reset_outstanding--;
2832					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2833					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2834					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
2835					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
2836					if (abort_flag) {
2837						return (1);
2838					}
2839					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
2840					stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2841					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
2842					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2843					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
2844					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
2845
2846					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2847					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2848
2849				}
2850			}
2851			/* get rid of the request and get the request flags */
2852			if (asoc->stream_reset_outstanding == 0) {
2853				sctp_clean_up_stream_reset(stcb);
2854			}
2855		}
2856	}
2857	return (0);
2858}
2859
2860static void
2861sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
2862    struct sctp_tmit_chunk *chk,
2863    struct sctp_stream_reset_in_request *req, int trunc)
2864{
2865	uint32_t seq;
2866	int len, i;
2867	int number_entries;
2868	uint16_t temp;
2869
2870	/*
2871	 * peer wants me to send a str-reset to him for my outgoing seq's if
2872	 * seq_in is right.
2873	 */
2874	struct sctp_association *asoc = &stcb->asoc;
2875
2876	seq = ntohl(req->request_seq);
2877	if (asoc->str_reset_seq_in == seq) {
2878		if (trunc) {
2879			/* Can't do it, since they exceeded our buffer size  */
2880			asoc->last_reset_action[1] = asoc->last_reset_action[0];
2881			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
2882			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2883		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
2884			len = ntohs(req->ph.param_length);
2885			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
2886			for (i = 0; i < number_entries; i++) {
2887				temp = ntohs(req->list_of_streams[i]);
2888				req->list_of_streams[i] = temp;
2889			}
2890			/* move the reset action back one */
2891			asoc->last_reset_action[1] = asoc->last_reset_action[0];
2892			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2893			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
2894			    asoc->str_reset_seq_out,
2895			    seq, (asoc->sending_seq - 1));
2896			asoc->stream_reset_out_is_outstanding = 1;
2897			asoc->str_reset = chk;
2898			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
2899			stcb->asoc.stream_reset_outstanding++;
2900		} else {
2901			/* Can't do it, since we have sent one out */
2902			asoc->last_reset_action[1] = asoc->last_reset_action[0];
2903			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
2904			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2905		}
2906		asoc->str_reset_seq_in++;
2907	} else if (asoc->str_reset_seq_in - 1 == seq) {
2908		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2909	} else if (asoc->str_reset_seq_in - 2 == seq) {
2910		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
2911	} else {
2912		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2913	}
2914}
2915
2916static int
2917sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
2918    struct sctp_tmit_chunk *chk,
2919    struct sctp_stream_reset_tsn_request *req)
2920{
2921	/* reset all in and out and update the tsn */
2922	/*
2923	 * A) reset my str-seq's on in and out. B) Select a receive next,
2924	 * and set cum-ack to it. Also process this selected number as a
2925	 * fwd-tsn as well. C) set in the response my next sending seq.
2926	 */
2927	struct sctp_forward_tsn_chunk fwdtsn;
2928	struct sctp_association *asoc = &stcb->asoc;
2929	int abort_flag = 0;
2930	uint32_t seq;
2931
2932	seq = ntohl(req->request_seq);
2933	if (asoc->str_reset_seq_in == seq) {
2934		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2935		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2936		fwdtsn.ch.chunk_flags = 0;
2937		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
2938		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
2939		if (abort_flag) {
2940			return (1);
2941		}
2942		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
2943		stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2944		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
2945		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2946		atomic_add_int(&stcb->asoc.sending_seq, 1);
2947		/* save off historical data for retrans */
2948		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
2949		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
2950		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
2951		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
2952
2953		sctp_add_stream_reset_result_tsn(chk,
2954		    ntohl(req->request_seq),
2955		    SCTP_STREAM_RESET_PERFORMED,
2956		    stcb->asoc.sending_seq,
2957		    stcb->asoc.mapping_array_base_tsn);
2958		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2959		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2960		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
2961		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2962
2963		asoc->str_reset_seq_in++;
2964	} else if (asoc->str_reset_seq_in - 1 == seq) {
2965		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
2966		    stcb->asoc.last_sending_seq[0],
2967		    stcb->asoc.last_base_tsnsent[0]
2968		    );
2969	} else if (asoc->str_reset_seq_in - 2 == seq) {
2970		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
2971		    stcb->asoc.last_sending_seq[1],
2972		    stcb->asoc.last_base_tsnsent[1]
2973		    );
2974	} else {
2975		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2976	}
2977	return (0);
2978}
2979
2980static void
2981sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
2982    struct sctp_tmit_chunk *chk,
2983    struct sctp_stream_reset_out_request *req, int trunc)
2984{
2985	uint32_t seq, tsn;
2986	int number_entries, len;
2987	struct sctp_association *asoc = &stcb->asoc;
2988
2989	seq = ntohl(req->request_seq);
2990
2991	/* now if its not a duplicate we process it */
2992	if (asoc->str_reset_seq_in == seq) {
2993		len = ntohs(req->ph.param_length);
2994		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
2995		/*
2996		 * the sender is resetting, handle the list issue.. we must
2997		 * a) verify if we can do the reset, if so no problem b) If
2998		 * we can't do the reset we must copy the request. c) queue
2999		 * it, and setup the data in processor to trigger it off
3000		 * when needed and dequeue all the queued data.
3001		 */
3002		tsn = ntohl(req->send_reset_at_tsn);
3003
3004		/* move the reset action back one */
3005		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3006		if (trunc) {
3007			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3008			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3009		} else if ((tsn == asoc->cumulative_tsn) ||
3010		    (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3011			/* we can do it now */
3012			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3013			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3014			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3015		} else {
3016			/*
3017			 * we must queue it up and thus wait for the TSN's
3018			 * to arrive that are at or before tsn
3019			 */
3020			struct sctp_stream_reset_list *liste;
3021			int siz;
3022
3023			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3024			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3025			    siz, SCTP_M_STRESET);
3026			if (liste == NULL) {
3027				/* gak out of memory */
3028				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3029				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3030				return;
3031			}
3032			liste->tsn = tsn;
3033			liste->number_entries = number_entries;
3034			memcpy(&liste->req, req,
3035			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3036			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3037			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3038			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3039		}
3040		asoc->str_reset_seq_in++;
3041	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3042		/*
3043		 * one seq back, just echo back last action since my
3044		 * response was lost.
3045		 */
3046		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3047	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3048		/*
3049		 * two seq back, just echo back last action since my
3050		 * response was lost.
3051		 */
3052		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3053	} else {
3054		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3055	}
3056}
3057
3058#ifdef __GNUC__
3059__attribute__((noinline))
3060#endif
3061	static int
3062	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3063        struct sctp_stream_reset_out_req *sr_req)
3064{
3065	int chk_length, param_len, ptype;
3066	struct sctp_paramhdr pstore;
3067	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3068
3069	uint32_t seq;
3070	int num_req = 0;
3071	int trunc = 0;
3072	struct sctp_tmit_chunk *chk;
3073	struct sctp_chunkhdr *ch;
3074	struct sctp_paramhdr *ph;
3075	int ret_code = 0;
3076	int num_param = 0;
3077
3078	/* now it may be a reset or a reset-response */
3079	chk_length = ntohs(sr_req->ch.chunk_length);
3080
3081	/* setup for adding the response */
3082	sctp_alloc_a_chunk(stcb, chk);
3083	if (chk == NULL) {
3084		return (ret_code);
3085	}
3086	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3087	chk->rec.chunk_id.can_take_data = 0;
3088	chk->asoc = &stcb->asoc;
3089	chk->no_fr_allowed = 0;
3090	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3091	chk->book_size_scale = 0;
3092	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3093	if (chk->data == NULL) {
3094strres_nochunk:
3095		if (chk->data) {
3096			sctp_m_freem(chk->data);
3097			chk->data = NULL;
3098		}
3099		sctp_free_a_chunk(stcb, chk);
3100		return (ret_code);
3101	}
3102	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3103
3104	/* setup chunk parameters */
3105	chk->sent = SCTP_DATAGRAM_UNSENT;
3106	chk->snd_count = 0;
3107	chk->whoTo = stcb->asoc.primary_destination;
3108	atomic_add_int(&chk->whoTo->ref_count, 1);
3109
3110	ch = mtod(chk->data, struct sctp_chunkhdr *);
3111	ch->chunk_type = SCTP_STREAM_RESET;
3112	ch->chunk_flags = 0;
3113	ch->chunk_length = htons(chk->send_size);
3114	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3115	offset += sizeof(struct sctp_chunkhdr);
3116	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3117		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
3118		if (ph == NULL)
3119			break;
3120		param_len = ntohs(ph->param_length);
3121		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3122			/* bad param */
3123			break;
3124		}
3125		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
3126		    (uint8_t *) & cstore);
3127		ptype = ntohs(ph->param_type);
3128		num_param++;
3129		if (param_len > (int)sizeof(cstore)) {
3130			trunc = 1;
3131		} else {
3132			trunc = 0;
3133		}
3134
3135		if (num_param > SCTP_MAX_RESET_PARAMS) {
3136			/* hit the max of parameters already sorry.. */
3137			break;
3138		}
3139		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3140			struct sctp_stream_reset_out_request *req_out;
3141
3142			req_out = (struct sctp_stream_reset_out_request *)ph;
3143			num_req++;
3144			if (stcb->asoc.stream_reset_outstanding) {
3145				seq = ntohl(req_out->response_seq);
3146				if (seq == stcb->asoc.str_reset_seq_out) {
3147					/* implicit ack */
3148					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3149				}
3150			}
3151			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
3152		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3153			struct sctp_stream_reset_in_request *req_in;
3154
3155			num_req++;
3156
3157
3158			req_in = (struct sctp_stream_reset_in_request *)ph;
3159
3160			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
3161		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3162			struct sctp_stream_reset_tsn_request *req_tsn;
3163
3164			num_req++;
3165			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3166
3167			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3168				ret_code = 1;
3169				goto strres_nochunk;
3170			}
3171			/* no more */
3172			break;
3173		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3174			struct sctp_stream_reset_response *resp;
3175			uint32_t result;
3176
3177			resp = (struct sctp_stream_reset_response *)ph;
3178			seq = ntohl(resp->response_seq);
3179			result = ntohl(resp->result);
3180			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3181				ret_code = 1;
3182				goto strres_nochunk;
3183			}
3184		} else {
3185			break;
3186		}
3187		offset += SCTP_SIZE32(param_len);
3188		chk_length -= SCTP_SIZE32(param_len);
3189	}
3190	if (num_req == 0) {
3191		/* we have no response free the stuff */
3192		goto strres_nochunk;
3193	}
3194	/* ok we have a chunk to link in */
3195	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3196	    chk,
3197	    sctp_next);
3198	stcb->asoc.ctrl_queue_cnt++;
3199	return (ret_code);
3200}
3201
3202/*
3203 * Handle a router or endpoints report of a packet loss, there are two ways
3204 * to handle this, either we get the whole packet and must disect it
3205 * ourselves (possibly with truncation and or corruption) or it is a summary
3206 * from a middle box that did the disectting for us.
3207 */
3208static void
3209sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3210    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
3211{
3212	uint32_t bottle_bw, on_queue;
3213	uint16_t trunc_len;
3214	unsigned int chlen;
3215	unsigned int at;
3216	struct sctp_chunk_desc desc;
3217	struct sctp_chunkhdr *ch;
3218
3219	chlen = ntohs(cp->ch.chunk_length);
3220	chlen -= sizeof(struct sctp_pktdrop_chunk);
3221	/* XXX possible chlen underflow */
3222	if (chlen == 0) {
3223		ch = NULL;
3224		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3225			SCTP_STAT_INCR(sctps_pdrpbwrpt);
3226	} else {
3227		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3228		chlen -= sizeof(struct sctphdr);
3229		/* XXX possible chlen underflow */
3230		memset(&desc, 0, sizeof(desc));
3231	}
3232	trunc_len = (uint16_t) ntohs(cp->trunc_len);
3233	if (trunc_len > limit) {
3234		trunc_len = limit;
3235	}
3236	/* now the chunks themselves */
3237	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3238		desc.chunk_type = ch->chunk_type;
3239		/* get amount we need to move */
3240		at = ntohs(ch->chunk_length);
3241		if (at < sizeof(struct sctp_chunkhdr)) {
3242			/* corrupt chunk, maybe at the end? */
3243			SCTP_STAT_INCR(sctps_pdrpcrupt);
3244			break;
3245		}
3246		if (trunc_len == 0) {
3247			/* we are supposed to have all of it */
3248			if (at > chlen) {
3249				/* corrupt skip it */
3250				SCTP_STAT_INCR(sctps_pdrpcrupt);
3251				break;
3252			}
3253		} else {
3254			/* is there enough of it left ? */
3255			if (desc.chunk_type == SCTP_DATA) {
3256				if (chlen < (sizeof(struct sctp_data_chunk) +
3257				    sizeof(desc.data_bytes))) {
3258					break;
3259				}
3260			} else {
3261				if (chlen < sizeof(struct sctp_chunkhdr)) {
3262					break;
3263				}
3264			}
3265		}
3266		if (desc.chunk_type == SCTP_DATA) {
3267			/* can we get out the tsn? */
3268			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3269				SCTP_STAT_INCR(sctps_pdrpmbda);
3270
3271			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
3272				/* yep */
3273				struct sctp_data_chunk *dcp;
3274				uint8_t *ddp;
3275				unsigned int iii;
3276
3277				dcp = (struct sctp_data_chunk *)ch;
3278				ddp = (uint8_t *) (dcp + 1);
3279				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
3280					desc.data_bytes[iii] = ddp[iii];
3281				}
3282				desc.tsn_ifany = dcp->dp.tsn;
3283			} else {
3284				/* nope we are done. */
3285				SCTP_STAT_INCR(sctps_pdrpnedat);
3286				break;
3287			}
3288		} else {
3289			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3290				SCTP_STAT_INCR(sctps_pdrpmbct);
3291		}
3292
3293		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
3294			SCTP_STAT_INCR(sctps_pdrppdbrk);
3295			break;
3296		}
3297		if (SCTP_SIZE32(at) > chlen) {
3298			break;
3299		}
3300		chlen -= SCTP_SIZE32(at);
3301		if (chlen < sizeof(struct sctp_chunkhdr)) {
3302			/* done, none left */
3303			break;
3304		}
3305		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
3306	}
3307	/* Now update any rwnd --- possibly */
3308	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
3309		/* From a peer, we get a rwnd report */
3310		uint32_t a_rwnd;
3311
3312		SCTP_STAT_INCR(sctps_pdrpfehos);
3313
3314		bottle_bw = ntohl(cp->bottle_bw);
3315		on_queue = ntohl(cp->current_onq);
3316		if (bottle_bw && on_queue) {
3317			/* a rwnd report is in here */
3318			if (bottle_bw > on_queue)
3319				a_rwnd = bottle_bw - on_queue;
3320			else
3321				a_rwnd = 0;
3322
3323			if (a_rwnd == 0)
3324				stcb->asoc.peers_rwnd = 0;
3325			else {
3326				if (a_rwnd > stcb->asoc.total_flight) {
3327					stcb->asoc.peers_rwnd =
3328					    a_rwnd - stcb->asoc.total_flight;
3329				} else {
3330					stcb->asoc.peers_rwnd = 0;
3331				}
3332				if (stcb->asoc.peers_rwnd <
3333				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3334					/* SWS sender side engages */
3335					stcb->asoc.peers_rwnd = 0;
3336				}
3337			}
3338		}
3339	} else {
3340		SCTP_STAT_INCR(sctps_pdrpfmbox);
3341	}
3342
3343	/* now middle boxes in sat networks get a cwnd bump */
3344	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
3345	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
3346	    (stcb->asoc.sat_network)) {
3347		/*
3348		 * This is debateable but for sat networks it makes sense
3349		 * Note if a T3 timer has went off, we will prohibit any
3350		 * changes to cwnd until we exit the t3 loss recovery.
3351		 */
3352		uint32_t bw_avail;
3353		int rtt, incr;
3354
3355		int old_cwnd = net->cwnd;
3356
3357		/* need real RTT for this calc */
3358		rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
3359		/* get bottle neck bw */
3360		bottle_bw = ntohl(cp->bottle_bw);
3361		/* and whats on queue */
3362		on_queue = ntohl(cp->current_onq);
3363		/*
3364		 * adjust the on-queue if our flight is more it could be
3365		 * that the router has not yet gotten data "in-flight" to it
3366		 */
3367		if (on_queue < net->flight_size)
3368			on_queue = net->flight_size;
3369
3370		/* calculate the available space */
3371		bw_avail = (bottle_bw * rtt) / 1000;
3372		if (bw_avail > bottle_bw) {
3373			/*
3374			 * Cap the growth to no more than the bottle neck.
3375			 * This can happen as RTT slides up due to queues.
3376			 * It also means if you have more than a 1 second
3377			 * RTT with a empty queue you will be limited to the
3378			 * bottle_bw per second no matter if other points
3379			 * have 1/2 the RTT and you could get more out...
3380			 */
3381			bw_avail = bottle_bw;
3382		}
3383		if (on_queue > bw_avail) {
3384			/*
3385			 * No room for anything else don't allow anything
3386			 * else to be "added to the fire".
3387			 */
3388			int seg_inflight, seg_onqueue, my_portion;
3389
3390			net->partial_bytes_acked = 0;
3391
3392			/* how much are we over queue size? */
3393			incr = on_queue - bw_avail;
3394			if (stcb->asoc.seen_a_sack_this_pkt) {
3395				/*
3396				 * undo any cwnd adjustment that the sack
3397				 * might have made
3398				 */
3399				net->cwnd = net->prev_cwnd;
3400			}
3401			/* Now how much of that is mine? */
3402			seg_inflight = net->flight_size / net->mtu;
3403			seg_onqueue = on_queue / net->mtu;
3404			my_portion = (incr * seg_inflight) / seg_onqueue;
3405
3406			/* Have I made an adjustment already */
3407			if (net->cwnd > net->flight_size) {
3408				/*
3409				 * for this flight I made an adjustment we
3410				 * need to decrease the portion by a share
3411				 * our previous adjustment.
3412				 */
3413				int diff_adj;
3414
3415				diff_adj = net->cwnd - net->flight_size;
3416				if (diff_adj > my_portion)
3417					my_portion = 0;
3418				else
3419					my_portion -= diff_adj;
3420			}
3421			/*
3422			 * back down to the previous cwnd (assume we have
3423			 * had a sack before this packet). minus what ever
3424			 * portion of the overage is my fault.
3425			 */
3426			net->cwnd -= my_portion;
3427
3428			/* we will NOT back down more than 1 MTU */
3429			if (net->cwnd <= net->mtu) {
3430				net->cwnd = net->mtu;
3431			}
3432			/* force into CA */
3433			net->ssthresh = net->cwnd - 1;
3434		} else {
3435			/*
3436			 * Take 1/4 of the space left or max burst up ..
3437			 * whichever is less.
3438			 */
3439			incr = min((bw_avail - on_queue) >> 2,
3440			    stcb->asoc.max_burst * net->mtu);
3441			net->cwnd += incr;
3442		}
3443		if (net->cwnd > bw_avail) {
3444			/* We can't exceed the pipe size */
3445			net->cwnd = bw_avail;
3446		}
3447		if (net->cwnd < net->mtu) {
3448			/* We always have 1 MTU */
3449			net->cwnd = net->mtu;
3450		}
3451		if (net->cwnd - old_cwnd != 0) {
3452			/* log only changes */
3453			if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
3454				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
3455				    SCTP_CWND_LOG_FROM_SAT);
3456			}
3457		}
3458	}
3459}
3460
3461/*
3462 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
3463 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
3464 * offset: offset into the mbuf chain to first chunkhdr - length: is the
3465 * length of the complete packet outputs: - length: modified to remaining
3466 * length after control processing - netp: modified to new sctp_nets after
3467 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
3468 * bad packet,...) otherwise return the tcb for this packet
3469 */
3470#ifdef __GNUC__
3471__attribute__((noinline))
3472#endif
3473	static struct sctp_tcb *
3474	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
3475             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
3476             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
3477             uint32_t vrf_id)
3478{
3479	struct sctp_association *asoc;
3480	uint32_t vtag_in;
3481	int num_chunks = 0;	/* number of control chunks processed */
3482	uint32_t chk_length;
3483	int ret;
3484	int abort_no_unlock = 0;
3485
3486	/*
3487	 * How big should this be, and should it be alloc'd? Lets try the
3488	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
3489	 * until we get into jumbo grams and such..
3490	 */
3491	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
3492	struct sctp_tcb *locked_tcb = stcb;
3493	int got_auth = 0;
3494	uint32_t auth_offset = 0, auth_len = 0;
3495	int auth_skipped = 0;
3496
3497	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
3498	    iphlen, *offset, length, stcb);
3499
3500	/* validate chunk header length... */
3501	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
3502		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
3503		    ntohs(ch->chunk_length));
3504		if (locked_tcb) {
3505			SCTP_TCB_UNLOCK(locked_tcb);
3506		}
3507		return (NULL);
3508	}
3509	/*
3510	 * validate the verification tag
3511	 */
3512	vtag_in = ntohl(sh->v_tag);
3513
3514	if (locked_tcb) {
3515		SCTP_TCB_LOCK_ASSERT(locked_tcb);
3516	}
3517	if (ch->chunk_type == SCTP_INITIATION) {
3518		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
3519		    ntohs(ch->chunk_length), vtag_in);
3520		if (vtag_in != 0) {
3521			/* protocol error- silently discard... */
3522			SCTP_STAT_INCR(sctps_badvtag);
3523			if (locked_tcb) {
3524				SCTP_TCB_UNLOCK(locked_tcb);
3525			}
3526			return (NULL);
3527		}
3528	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
3529		/*
3530		 * If there is no stcb, skip the AUTH chunk and process
3531		 * later after a stcb is found (to validate the lookup was
3532		 * valid.
3533		 */
3534		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
3535		    (stcb == NULL) && !sctp_auth_disable) {
3536			/* save this chunk for later processing */
3537			auth_skipped = 1;
3538			auth_offset = *offset;
3539			auth_len = ntohs(ch->chunk_length);
3540
3541			/* (temporarily) move past this chunk */
3542			*offset += SCTP_SIZE32(auth_len);
3543			if (*offset >= length) {
3544				/* no more data left in the mbuf chain */
3545				*offset = length;
3546				if (locked_tcb) {
3547					SCTP_TCB_UNLOCK(locked_tcb);
3548				}
3549				return (NULL);
3550			}
3551			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3552			    sizeof(struct sctp_chunkhdr), chunk_buf);
3553		}
3554		if (ch == NULL) {
3555			/* Help */
3556			*offset = length;
3557			if (locked_tcb) {
3558				SCTP_TCB_UNLOCK(locked_tcb);
3559			}
3560			return (NULL);
3561		}
3562		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3563			goto process_control_chunks;
3564		}
3565		/*
3566		 * first check if it's an ASCONF with an unknown src addr we
3567		 * need to look inside to find the association
3568		 */
3569		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
3570			/* inp's refcount may be reduced */
3571			SCTP_INP_INCR_REF(inp);
3572
3573			stcb = sctp_findassociation_ep_asconf(m, iphlen,
3574			    *offset, sh, &inp, netp);
3575			if (stcb == NULL) {
3576				/*
3577				 * reduce inp's refcount if not reduced in
3578				 * sctp_findassociation_ep_asconf().
3579				 */
3580				SCTP_INP_DECR_REF(inp);
3581			}
3582			/* now go back and verify any auth chunk to be sure */
3583			if (auth_skipped && (stcb != NULL)) {
3584				struct sctp_auth_chunk *auth;
3585
3586				auth = (struct sctp_auth_chunk *)
3587				    sctp_m_getptr(m, auth_offset,
3588				    auth_len, chunk_buf);
3589				got_auth = 1;
3590				auth_skipped = 0;
3591				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
3592				    auth_offset)) {
3593					/* auth HMAC failed so dump it */
3594					*offset = length;
3595					if (locked_tcb) {
3596						SCTP_TCB_UNLOCK(locked_tcb);
3597					}
3598					return (NULL);
3599				} else {
3600					/* remaining chunks are HMAC checked */
3601					stcb->asoc.authenticated = 1;
3602				}
3603			}
3604		}
3605		if (stcb == NULL) {
3606			/* no association, so it's out of the blue... */
3607			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
3608			    vrf_id);
3609			*offset = length;
3610			if (locked_tcb) {
3611				SCTP_TCB_UNLOCK(locked_tcb);
3612			}
3613			return (NULL);
3614		}
3615		asoc = &stcb->asoc;
3616		/* ABORT and SHUTDOWN can use either v_tag... */
3617		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
3618		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
3619		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
3620			if ((vtag_in == asoc->my_vtag) ||
3621			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
3622			    (vtag_in == asoc->peer_vtag))) {
3623				/* this is valid */
3624			} else {
3625				/* drop this packet... */
3626				SCTP_STAT_INCR(sctps_badvtag);
3627				if (locked_tcb) {
3628					SCTP_TCB_UNLOCK(locked_tcb);
3629				}
3630				return (NULL);
3631			}
3632		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
3633			if (vtag_in != asoc->my_vtag) {
3634				/*
3635				 * this could be a stale SHUTDOWN-ACK or the
3636				 * peer never got the SHUTDOWN-COMPLETE and
3637				 * is still hung; we have started a new asoc
3638				 * but it won't complete until the shutdown
3639				 * is completed
3640				 */
3641				if (locked_tcb) {
3642					SCTP_TCB_UNLOCK(locked_tcb);
3643				}
3644				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
3645				    NULL, vrf_id);
3646				return (NULL);
3647			}
3648		} else {
3649			/* for all other chunks, vtag must match */
3650			if (vtag_in != asoc->my_vtag) {
3651				/* invalid vtag... */
3652				SCTPDBG(SCTP_DEBUG_INPUT3,
3653				    "invalid vtag: %xh, expect %xh\n",
3654				    vtag_in, asoc->my_vtag);
3655				SCTP_STAT_INCR(sctps_badvtag);
3656				if (locked_tcb) {
3657					SCTP_TCB_UNLOCK(locked_tcb);
3658				}
3659				*offset = length;
3660				return (NULL);
3661			}
3662		}
3663	}			/* end if !SCTP_COOKIE_ECHO */
3664	/*
3665	 * process all control chunks...
3666	 */
3667	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
3668	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
3669	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
3670		/* implied cookie-ack.. we must have lost the ack */
3671		stcb->asoc.overall_error_count = 0;
3672		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
3673		    *netp);
3674	}
3675process_control_chunks:
3676	while (IS_SCTP_CONTROL(ch)) {
3677		/* validate chunk length */
3678		chk_length = ntohs(ch->chunk_length);
3679		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
3680		    ch->chunk_type, chk_length);
3681		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
3682		if (chk_length < sizeof(*ch) ||
3683		    (*offset + (int)chk_length) > length) {
3684			*offset = length;
3685			if (locked_tcb) {
3686				SCTP_TCB_UNLOCK(locked_tcb);
3687			}
3688			return (NULL);
3689		}
3690		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
3691		/*
3692		 * INIT-ACK only gets the init ack "header" portion only
3693		 * because we don't have to process the peer's COOKIE. All
3694		 * others get a complete chunk.
3695		 */
3696		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
3697		    (ch->chunk_type == SCTP_INITIATION)) {
3698			/* get an init-ack chunk */
3699			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3700			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
3701			if (ch == NULL) {
3702				*offset = length;
3703				if (locked_tcb) {
3704					SCTP_TCB_UNLOCK(locked_tcb);
3705				}
3706				return (NULL);
3707			}
3708		} else {
3709			/* For cookies and all other chunks. */
3710			if (chk_length > sizeof(chunk_buf)) {
3711				/*
3712				 * use just the size of the chunk buffer so
3713				 * the front part of our chunks fit in
3714				 * contiguous space up to the chunk buffer
3715				 * size (508 bytes). For chunks that need to
3716				 * get more than that they must use the
3717				 * sctp_m_getptr() function or other means
3718				 * (e.g. know how to parse mbuf chains).
3719				 * Cookies do this already.
3720				 */
3721				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3722				    (sizeof(chunk_buf) - 4),
3723				    chunk_buf);
3724				if (ch == NULL) {
3725					*offset = length;
3726					if (locked_tcb) {
3727						SCTP_TCB_UNLOCK(locked_tcb);
3728					}
3729					return (NULL);
3730				}
3731			} else {
3732				/* We can fit it all */
3733				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3734				    chk_length, chunk_buf);
3735				if (ch == NULL) {
3736					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
3737					*offset = length;
3738					if (locked_tcb) {
3739						SCTP_TCB_UNLOCK(locked_tcb);
3740					}
3741					return (NULL);
3742				}
3743			}
3744		}
3745		num_chunks++;
3746		/* Save off the last place we got a control from */
3747		if (stcb != NULL) {
3748			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
3749				/*
3750				 * allow last_control to be NULL if
3751				 * ASCONF... ASCONF processing will find the
3752				 * right net later
3753				 */
3754				if ((netp != NULL) && (*netp != NULL))
3755					stcb->asoc.last_control_chunk_from = *netp;
3756			}
3757		}
3758#ifdef SCTP_AUDITING_ENABLED
3759		sctp_audit_log(0xB0, ch->chunk_type);
3760#endif
3761
3762		/* check to see if this chunk required auth, but isn't */
3763		if ((stcb != NULL) && !sctp_auth_disable &&
3764		    sctp_auth_is_required_chunk(ch->chunk_type,
3765		    stcb->asoc.local_auth_chunks) &&
3766		    !stcb->asoc.authenticated) {
3767			/* "silently" ignore */
3768			SCTP_STAT_INCR(sctps_recvauthmissing);
3769			goto next_chunk;
3770		}
3771		switch (ch->chunk_type) {
3772		case SCTP_INITIATION:
3773			/* must be first and only chunk */
3774			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
3775			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3776				/* We are not interested anymore? */
3777				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3778					/*
3779					 * collision case where we are
3780					 * sending to them too
3781					 */
3782					;
3783				} else {
3784					if (locked_tcb) {
3785						SCTP_TCB_UNLOCK(locked_tcb);
3786					}
3787					*offset = length;
3788					return (NULL);
3789				}
3790			}
3791			if ((num_chunks > 1) ||
3792			    (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3793				*offset = length;
3794				if (locked_tcb) {
3795					SCTP_TCB_UNLOCK(locked_tcb);
3796				}
3797				return (NULL);
3798			}
3799			if ((stcb != NULL) &&
3800			    (SCTP_GET_STATE(&stcb->asoc) ==
3801			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
3802				sctp_send_shutdown_ack(stcb,
3803				    stcb->asoc.primary_destination);
3804				*offset = length;
3805				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3806				if (locked_tcb) {
3807					SCTP_TCB_UNLOCK(locked_tcb);
3808				}
3809				return (NULL);
3810			}
3811			if (netp) {
3812				sctp_handle_init(m, iphlen, *offset, sh,
3813				    (struct sctp_init_chunk *)ch, inp,
3814				    stcb, *netp, &abort_no_unlock, vrf_id);
3815			}
3816			if (abort_no_unlock)
3817				return (NULL);
3818
3819			*offset = length;
3820			if (locked_tcb) {
3821				SCTP_TCB_UNLOCK(locked_tcb);
3822			}
3823			return (NULL);
3824			break;
3825		case SCTP_PAD_CHUNK:
3826			break;
3827		case SCTP_INITIATION_ACK:
3828			/* must be first and only chunk */
3829			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
3830			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3831				/* We are not interested anymore */
3832				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3833					;
3834				} else {
3835					if (locked_tcb) {
3836						SCTP_TCB_UNLOCK(locked_tcb);
3837					}
3838					*offset = length;
3839					if (stcb) {
3840						sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3841					}
3842					return (NULL);
3843				}
3844			}
3845			if ((num_chunks > 1) ||
3846			    (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3847				*offset = length;
3848				if (locked_tcb) {
3849					SCTP_TCB_UNLOCK(locked_tcb);
3850				}
3851				return (NULL);
3852			}
3853			if ((netp) && (*netp)) {
3854				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
3855				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
3856			} else {
3857				ret = -1;
3858			}
3859			/*
3860			 * Special case, I must call the output routine to
3861			 * get the cookie echoed
3862			 */
3863			if (abort_no_unlock)
3864				return (NULL);
3865
3866			if ((stcb) && ret == 0)
3867				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3868			*offset = length;
3869			if (locked_tcb) {
3870				SCTP_TCB_UNLOCK(locked_tcb);
3871			}
3872			return (NULL);
3873			break;
3874		case SCTP_SELECTIVE_ACK:
3875			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
3876			SCTP_STAT_INCR(sctps_recvsacks);
3877			{
3878				struct sctp_sack_chunk *sack;
3879				int abort_now = 0;
3880				uint32_t a_rwnd, cum_ack;
3881				uint16_t num_seg;
3882				int nonce_sum_flag;
3883
3884				if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
3885					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
3886					*offset = length;
3887					if (locked_tcb) {
3888						SCTP_TCB_UNLOCK(locked_tcb);
3889					}
3890					return (NULL);
3891				}
3892				sack = (struct sctp_sack_chunk *)ch;
3893				nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
3894				cum_ack = ntohl(sack->sack.cum_tsn_ack);
3895				num_seg = ntohs(sack->sack.num_gap_ack_blks);
3896				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
3897				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
3898				    cum_ack,
3899				    num_seg,
3900				    a_rwnd
3901				    );
3902				stcb->asoc.seen_a_sack_this_pkt = 1;
3903				if ((stcb->asoc.pr_sctp_cnt == 0) &&
3904				    (num_seg == 0) &&
3905				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
3906				    (cum_ack == stcb->asoc.last_acked_seq)) &&
3907				    (stcb->asoc.saw_sack_with_frags == 0) &&
3908				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
3909				    ) {
3910					/*
3911					 * We have a SIMPLE sack having no
3912					 * prior segments and data on sent
3913					 * queue to be acked.. Use the
3914					 * faster path sack processing. We
3915					 * also allow window update sacks
3916					 * with no missing segments to go
3917					 * this way too.
3918					 */
3919					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
3920					    &abort_now);
3921				} else {
3922					if (netp && *netp)
3923						sctp_handle_sack(m, *offset,
3924						    sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
3925				}
3926				if (abort_now) {
3927					/* ABORT signal from sack processing */
3928					*offset = length;
3929					return (NULL);
3930				}
3931			}
3932			break;
3933		case SCTP_HEARTBEAT_REQUEST:
3934			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
3935			if ((stcb) && netp && *netp) {
3936				SCTP_STAT_INCR(sctps_recvheartbeat);
3937				sctp_send_heartbeat_ack(stcb, m, *offset,
3938				    chk_length, *netp);
3939
3940				/* He's alive so give him credit */
3941				stcb->asoc.overall_error_count = 0;
3942			}
3943			break;
3944		case SCTP_HEARTBEAT_ACK:
3945			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
3946			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
3947				/* Its not ours */
3948				*offset = length;
3949				if (locked_tcb) {
3950					SCTP_TCB_UNLOCK(locked_tcb);
3951				}
3952				return (NULL);
3953			}
3954			/* He's alive so give him credit */
3955			stcb->asoc.overall_error_count = 0;
3956			SCTP_STAT_INCR(sctps_recvheartbeatack);
3957			if (netp && *netp)
3958				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
3959				    stcb, *netp);
3960			break;
3961		case SCTP_ABORT_ASSOCIATION:
3962			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
3963			    stcb);
3964			if ((stcb) && netp && *netp)
3965				sctp_handle_abort((struct sctp_abort_chunk *)ch,
3966				    stcb, *netp);
3967			*offset = length;
3968			return (NULL);
3969			break;
3970		case SCTP_SHUTDOWN:
3971			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
3972			    stcb);
3973			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
3974				*offset = length;
3975				if (locked_tcb) {
3976					SCTP_TCB_UNLOCK(locked_tcb);
3977				}
3978				return (NULL);
3979
3980			}
3981			if (netp && *netp) {
3982				int abort_flag = 0;
3983
3984				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
3985				    stcb, *netp, &abort_flag);
3986				if (abort_flag) {
3987					*offset = length;
3988					return (NULL);
3989				}
3990			}
3991			break;
3992		case SCTP_SHUTDOWN_ACK:
3993			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
3994			if ((stcb) && (netp) && (*netp))
3995				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
3996			*offset = length;
3997			return (NULL);
3998			break;
3999
4000		case SCTP_OPERATION_ERROR:
4001			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
4002			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
4003
4004				*offset = length;
4005				return (NULL);
4006			}
4007			break;
4008		case SCTP_COOKIE_ECHO:
4009			SCTPDBG(SCTP_DEBUG_INPUT3,
4010			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
4011			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4012				;
4013			} else {
4014				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4015					/* We are not interested anymore */
4016					*offset = length;
4017					return (NULL);
4018				}
4019			}
4020			/*
4021			 * First are we accepting? We do this again here
4022			 * sincen it is possible that a previous endpoint
4023			 * WAS listening responded to a INIT-ACK and then
4024			 * closed. We opened and bound.. and are now no
4025			 * longer listening.
4026			 */
4027			if (inp->sctp_socket->so_qlimit == 0) {
4028				if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4029					/*
4030					 * special case, is this a retran'd
4031					 * COOKIE-ECHO or a restarting assoc
4032					 * that is a peeled off or
4033					 * one-to-one style socket.
4034					 */
4035					goto process_cookie_anyway;
4036				}
4037				sctp_abort_association(inp, stcb, m, iphlen,
4038				    sh, NULL, vrf_id);
4039				*offset = length;
4040				return (NULL);
4041			} else if (inp->sctp_socket->so_qlimit) {
4042				/* we are accepting so check limits like TCP */
4043				if (inp->sctp_socket->so_qlen >
4044				    inp->sctp_socket->so_qlimit) {
4045					/* no space */
4046					struct mbuf *oper;
4047					struct sctp_paramhdr *phdr;
4048
4049					if (sctp_abort_if_one_2_one_hits_limit) {
4050						oper = NULL;
4051						oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4052						    0, M_DONTWAIT, 1, MT_DATA);
4053						if (oper) {
4054							SCTP_BUF_LEN(oper) =
4055							    sizeof(struct sctp_paramhdr);
4056							phdr = mtod(oper,
4057							    struct sctp_paramhdr *);
4058							phdr->param_type =
4059							    htons(SCTP_CAUSE_OUT_OF_RESC);
4060							phdr->param_length =
4061							    htons(sizeof(struct sctp_paramhdr));
4062						}
4063						sctp_abort_association(inp, stcb, m,
4064						    iphlen, sh, oper, vrf_id);
4065					}
4066					*offset = length;
4067					return (NULL);
4068				}
4069			}
4070	process_cookie_anyway:
4071			{
4072				struct mbuf *ret_buf;
4073				struct sctp_inpcb *linp;
4074
4075				if (stcb) {
4076					linp = NULL;
4077				} else {
4078					linp = inp;
4079				}
4080
4081				if (linp) {
4082					SCTP_ASOC_CREATE_LOCK(linp);
4083				}
4084				if (netp) {
4085					ret_buf =
4086					    sctp_handle_cookie_echo(m, iphlen,
4087					    *offset, sh,
4088					    (struct sctp_cookie_echo_chunk *)ch,
4089					    &inp, &stcb, netp,
4090					    auth_skipped,
4091					    auth_offset,
4092					    auth_len,
4093					    &locked_tcb,
4094					    vrf_id);
4095				} else {
4096					ret_buf = NULL;
4097				}
4098				if (linp) {
4099					SCTP_ASOC_CREATE_UNLOCK(linp);
4100				}
4101				if (ret_buf == NULL) {
4102					if (locked_tcb) {
4103						SCTP_TCB_UNLOCK(locked_tcb);
4104					}
4105					SCTPDBG(SCTP_DEBUG_INPUT3,
4106					    "GAK, null buffer\n");
4107					auth_skipped = 0;
4108					*offset = length;
4109					return (NULL);
4110				}
4111				/* if AUTH skipped, see if it verified... */
4112				if (auth_skipped) {
4113					got_auth = 1;
4114					auth_skipped = 0;
4115				}
4116				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4117					/*
4118					 * Restart the timer if we have
4119					 * pending data
4120					 */
4121					struct sctp_tmit_chunk *chk;
4122
4123					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4124					if (chk) {
4125						sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4126						    stcb->sctp_ep, stcb,
4127						    chk->whoTo);
4128					}
4129				}
4130			}
4131			break;
4132		case SCTP_COOKIE_ACK:
4133			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4134			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4135				if (locked_tcb) {
4136					SCTP_TCB_UNLOCK(locked_tcb);
4137				}
4138				return (NULL);
4139			}
4140			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4141				/* We are not interested anymore */
4142				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4143					;
4144				} else if (stcb) {
4145					sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4146					*offset = length;
4147					return (NULL);
4148				}
4149			}
4150			/* He's alive so give him credit */
4151			if ((stcb) && netp && *netp) {
4152				stcb->asoc.overall_error_count = 0;
4153				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4154			}
4155			break;
4156		case SCTP_ECN_ECHO:
4157			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4158			/* He's alive so give him credit */
4159			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4160				/* Its not ours */
4161				if (locked_tcb) {
4162					SCTP_TCB_UNLOCK(locked_tcb);
4163				}
4164				*offset = length;
4165				return (NULL);
4166			}
4167			if (stcb) {
4168				stcb->asoc.overall_error_count = 0;
4169				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
4170				    stcb);
4171			}
4172			break;
4173		case SCTP_ECN_CWR:
4174			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
4175			/* He's alive so give him credit */
4176			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
4177				/* Its not ours */
4178				if (locked_tcb) {
4179					SCTP_TCB_UNLOCK(locked_tcb);
4180				}
4181				*offset = length;
4182				return (NULL);
4183			}
4184			if (stcb) {
4185				stcb->asoc.overall_error_count = 0;
4186				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
4187			}
4188			break;
4189		case SCTP_SHUTDOWN_COMPLETE:
4190			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
4191			/* must be first and only chunk */
4192			if ((num_chunks > 1) ||
4193			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4194				*offset = length;
4195				if (locked_tcb) {
4196					SCTP_TCB_UNLOCK(locked_tcb);
4197				}
4198				return (NULL);
4199			}
4200			if ((stcb) && netp && *netp) {
4201				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
4202				    stcb, *netp);
4203			}
4204			*offset = length;
4205			return (NULL);
4206			break;
4207		case SCTP_ASCONF:
4208			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
4209			/* He's alive so give him credit */
4210			if (stcb) {
4211				stcb->asoc.overall_error_count = 0;
4212				sctp_handle_asconf(m, *offset,
4213				    (struct sctp_asconf_chunk *)ch, stcb);
4214			}
4215			break;
4216		case SCTP_ASCONF_ACK:
4217			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
4218			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
4219				/* Its not ours */
4220				if (locked_tcb) {
4221					SCTP_TCB_UNLOCK(locked_tcb);
4222				}
4223				*offset = length;
4224				return (NULL);
4225			}
4226			if ((stcb) && netp && *netp) {
4227				/* He's alive so give him credit */
4228				stcb->asoc.overall_error_count = 0;
4229				sctp_handle_asconf_ack(m, *offset,
4230				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp);
4231			}
4232			break;
4233		case SCTP_FORWARD_CUM_TSN:
4234			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
4235			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
4236				/* Its not ours */
4237				if (locked_tcb) {
4238					SCTP_TCB_UNLOCK(locked_tcb);
4239				}
4240				*offset = length;
4241				return (NULL);
4242			}
4243			/* He's alive so give him credit */
4244			if (stcb) {
4245				int abort_flag = 0;
4246
4247				stcb->asoc.overall_error_count = 0;
4248				*fwd_tsn_seen = 1;
4249				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4250					/* We are not interested anymore */
4251					sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
4252					*offset = length;
4253					return (NULL);
4254				}
4255				sctp_handle_forward_tsn(stcb,
4256				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
4257				if (abort_flag) {
4258					*offset = length;
4259					return (NULL);
4260				} else {
4261					stcb->asoc.overall_error_count = 0;
4262				}
4263
4264			}
4265			break;
4266		case SCTP_STREAM_RESET:
4267			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
4268			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
4269				/* Its not ours */
4270				if (locked_tcb) {
4271					SCTP_TCB_UNLOCK(locked_tcb);
4272				}
4273				*offset = length;
4274				return (NULL);
4275			}
4276			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4277				/* We are not interested anymore */
4278				sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4279				*offset = length;
4280				return (NULL);
4281			}
4282			if (stcb->asoc.peer_supports_strreset == 0) {
4283				/*
4284				 * hmm, peer should have announced this, but
4285				 * we will turn it on since he is sending us
4286				 * a stream reset.
4287				 */
4288				stcb->asoc.peer_supports_strreset = 1;
4289			}
4290			if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
4291				/* stop processing */
4292				*offset = length;
4293				return (NULL);
4294			}
4295			break;
4296		case SCTP_PACKET_DROPPED:
4297			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
4298			/* re-get it all please */
4299			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
4300				/* Its not ours */
4301				if (locked_tcb) {
4302					SCTP_TCB_UNLOCK(locked_tcb);
4303				}
4304				*offset = length;
4305				return (NULL);
4306			}
4307			if (ch && (stcb) && netp && (*netp)) {
4308				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
4309				    stcb, *netp,
4310				    min(chk_length, (sizeof(chunk_buf) - 4)));
4311
4312			}
4313			break;
4314
4315		case SCTP_AUTHENTICATION:
4316			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
4317			if (sctp_auth_disable)
4318				goto unknown_chunk;
4319
4320			if (stcb == NULL) {
4321				/* save the first AUTH for later processing */
4322				if (auth_skipped == 0) {
4323					auth_offset = *offset;
4324					auth_len = chk_length;
4325					auth_skipped = 1;
4326				}
4327				/* skip this chunk (temporarily) */
4328				goto next_chunk;
4329			}
4330			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
4331			    (chk_length > (sizeof(struct sctp_auth_chunk) +
4332			    SCTP_AUTH_DIGEST_LEN_MAX))) {
4333				/* Its not ours */
4334				if (locked_tcb) {
4335					SCTP_TCB_UNLOCK(locked_tcb);
4336				}
4337				*offset = length;
4338				return (NULL);
4339			}
4340			if (got_auth == 1) {
4341				/* skip this chunk... it's already auth'd */
4342				goto next_chunk;
4343			}
4344			got_auth = 1;
4345			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
4346			    m, *offset)) {
4347				/* auth HMAC failed so dump the packet */
4348				*offset = length;
4349				return (stcb);
4350			} else {
4351				/* remaining chunks are HMAC checked */
4352				stcb->asoc.authenticated = 1;
4353			}
4354			break;
4355
4356		default:
4357	unknown_chunk:
4358			/* it's an unknown chunk! */
4359			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
4360				struct mbuf *mm;
4361				struct sctp_paramhdr *phd;
4362
4363				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4364				    0, M_DONTWAIT, 1, MT_DATA);
4365				if (mm) {
4366					phd = mtod(mm, struct sctp_paramhdr *);
4367					/*
4368					 * We cheat and use param type since
4369					 * we did not bother to define a
4370					 * error cause struct. They are the
4371					 * same basic format with different
4372					 * names.
4373					 */
4374					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
4375					phd->param_length = htons(chk_length + sizeof(*phd));
4376					SCTP_BUF_LEN(mm) = sizeof(*phd);
4377					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
4378					    M_DONTWAIT);
4379					if (SCTP_BUF_NEXT(mm)) {
4380						sctp_queue_op_err(stcb, mm);
4381					} else {
4382						sctp_m_freem(mm);
4383					}
4384				}
4385			}
4386			if ((ch->chunk_type & 0x80) == 0) {
4387				/* discard this packet */
4388				*offset = length;
4389				return (stcb);
4390			}	/* else skip this bad chunk and continue... */
4391			break;
4392		}		/* switch (ch->chunk_type) */
4393
4394
4395next_chunk:
4396		/* get the next chunk */
4397		*offset += SCTP_SIZE32(chk_length);
4398		if (*offset >= length) {
4399			/* no more data left in the mbuf chain */
4400			break;
4401		}
4402		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4403		    sizeof(struct sctp_chunkhdr), chunk_buf);
4404		if (ch == NULL) {
4405			if (locked_tcb) {
4406				SCTP_TCB_UNLOCK(locked_tcb);
4407			}
4408			*offset = length;
4409			return (NULL);
4410		}
4411	}			/* while */
4412	return (stcb);
4413}
4414
4415
4416/*
4417 * Process the ECN bits we have something set so we must look to see if it is
4418 * ECN(0) or ECN(1) or CE
4419 */
4420static void
4421sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
4422    uint8_t ecn_bits)
4423{
4424	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4425		;
4426	} else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
4427		/*
4428		 * we only add to the nonce sum for ECT1, ECT0 does not
4429		 * change the NS bit (that we have yet to find a way to send
4430		 * it yet).
4431		 */
4432
4433		/* ECN Nonce stuff */
4434		stcb->asoc.receiver_nonce_sum++;
4435		stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
4436
4437		/*
4438		 * Drag up the last_echo point if cumack is larger since we
4439		 * don't want the point falling way behind by more than
4440		 * 2^^31 and then having it be incorrect.
4441		 */
4442		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4443		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4444			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4445		}
4446	} else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
4447		/*
4448		 * Drag up the last_echo point if cumack is larger since we
4449		 * don't want the point falling way behind by more than
4450		 * 2^^31 and then having it be incorrect.
4451		 */
4452		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4453		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4454			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4455		}
4456	}
4457}
4458
4459static void
4460sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
4461    uint32_t high_tsn, uint8_t ecn_bits)
4462{
4463	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4464		/*
4465		 * we possibly must notify the sender that a congestion
4466		 * window reduction is in order. We do this by adding a ECNE
4467		 * chunk to the output chunk queue. The incoming CWR will
4468		 * remove this chunk.
4469		 */
4470		if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
4471		    MAX_TSN)) {
4472			/* Yep, we need to add a ECNE */
4473			sctp_send_ecn_echo(stcb, net, high_tsn);
4474			stcb->asoc.last_echo_tsn = high_tsn;
4475		}
4476	}
4477}
4478
4479/*
4480 * common input chunk processing (v4 and v6)
4481 */
4482void
4483sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
4484    int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
4485    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
4486    uint8_t ecn_bits, uint32_t vrf_id)
4487{
4488	/*
4489	 * Control chunk processing
4490	 */
4491	uint32_t high_tsn;
4492	int fwd_tsn_seen = 0, data_processed = 0;
4493	struct mbuf *m = *mm;
4494	int abort_flag = 0;
4495	int un_sent;
4496
4497	SCTP_STAT_INCR(sctps_recvdatagrams);
4498#ifdef SCTP_AUDITING_ENABLED
4499	sctp_audit_log(0xE0, 1);
4500	sctp_auditing(0, inp, stcb, net);
4501#endif
4502
4503	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n",
4504	    m, iphlen, offset);
4505
4506	if (stcb) {
4507		/* always clear this before beginning a packet */
4508		stcb->asoc.authenticated = 0;
4509		stcb->asoc.seen_a_sack_this_pkt = 0;
4510	}
4511	if (IS_SCTP_CONTROL(ch)) {
4512		/* process the control portion of the SCTP packet */
4513		/* sa_ignore NO_NULL_CHK */
4514		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
4515		    inp, stcb, &net, &fwd_tsn_seen, vrf_id);
4516		if (stcb) {
4517			/*
4518			 * This covers us if the cookie-echo was there and
4519			 * it changes our INP.
4520			 */
4521			inp = stcb->sctp_ep;
4522		}
4523	} else {
4524		/*
4525		 * no control chunks, so pre-process DATA chunks (these
4526		 * checks are taken care of by control processing)
4527		 */
4528
4529		/*
4530		 * if DATA only packet, and auth is required, then punt...
4531		 * can't have authenticated without any AUTH (control)
4532		 * chunks
4533		 */
4534		if ((stcb != NULL) && !sctp_auth_disable &&
4535		    sctp_auth_is_required_chunk(SCTP_DATA,
4536		    stcb->asoc.local_auth_chunks)) {
4537			/* "silently" ignore */
4538			SCTP_STAT_INCR(sctps_recvauthmissing);
4539			SCTP_TCB_UNLOCK(stcb);
4540			return;
4541		}
4542		if (stcb == NULL) {
4543			/* out of the blue DATA chunk */
4544			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4545			    vrf_id);
4546			return;
4547		}
4548		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
4549			/* v_tag mismatch! */
4550			SCTP_STAT_INCR(sctps_badvtag);
4551			SCTP_TCB_UNLOCK(stcb);
4552			return;
4553		}
4554	}
4555
4556	if (stcb == NULL) {
4557		/*
4558		 * no valid TCB for this packet, or we found it's a bad
4559		 * packet while processing control, or we're done with this
4560		 * packet (done or skip rest of data), so we drop it...
4561		 */
4562		return;
4563	}
4564	/*
4565	 * DATA chunk processing
4566	 */
4567	/* plow through the data chunks while length > offset */
4568
4569	/*
4570	 * Rest should be DATA only.  Check authentication state if AUTH for
4571	 * DATA is required.
4572	 */
4573	if ((length > offset) && (stcb != NULL) && !sctp_auth_disable &&
4574	    sctp_auth_is_required_chunk(SCTP_DATA,
4575	    stcb->asoc.local_auth_chunks) &&
4576	    !stcb->asoc.authenticated) {
4577		/* "silently" ignore */
4578		SCTP_STAT_INCR(sctps_recvauthmissing);
4579		SCTPDBG(SCTP_DEBUG_AUTH1,
4580		    "Data chunk requires AUTH, skipped\n");
4581		goto trigger_send;
4582	}
4583	if (length > offset) {
4584		int retval;
4585
4586		/*
4587		 * First check to make sure our state is correct. We would
4588		 * not get here unless we really did have a tag, so we don't
4589		 * abort if this happens, just dump the chunk silently.
4590		 */
4591		switch (SCTP_GET_STATE(&stcb->asoc)) {
4592		case SCTP_STATE_COOKIE_ECHOED:
4593			/*
4594			 * we consider data with valid tags in this state
4595			 * shows us the cookie-ack was lost. Imply it was
4596			 * there.
4597			 */
4598			stcb->asoc.overall_error_count = 0;
4599			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
4600			break;
4601		case SCTP_STATE_COOKIE_WAIT:
4602			/*
4603			 * We consider OOTB any data sent during asoc setup.
4604			 */
4605			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4606			    vrf_id);
4607			SCTP_TCB_UNLOCK(stcb);
4608			return;
4609			break;
4610		case SCTP_STATE_EMPTY:	/* should not happen */
4611		case SCTP_STATE_INUSE:	/* should not happen */
4612		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
4613		case SCTP_STATE_SHUTDOWN_ACK_SENT:
4614		default:
4615			SCTP_TCB_UNLOCK(stcb);
4616			return;
4617			break;
4618		case SCTP_STATE_OPEN:
4619		case SCTP_STATE_SHUTDOWN_SENT:
4620			break;
4621		}
4622		/* take care of ECN, part 1. */
4623		if (stcb->asoc.ecn_allowed &&
4624		    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4625			sctp_process_ecn_marked_a(stcb, net, ecn_bits);
4626		}
4627		/* plow through the data chunks while length > offset */
4628		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
4629		    inp, stcb, net, &high_tsn);
4630		if (retval == 2) {
4631			/*
4632			 * The association aborted, NO UNLOCK needed since
4633			 * the association is destroyed.
4634			 */
4635			return;
4636		}
4637		data_processed = 1;
4638		if (retval == 0) {
4639			/* take care of ecn part 2. */
4640			if (stcb->asoc.ecn_allowed &&
4641			    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4642				sctp_process_ecn_marked_b(stcb, net, high_tsn,
4643				    ecn_bits);
4644			}
4645		}
4646		/*
4647		 * Anything important needs to have been m_copy'ed in
4648		 * process_data
4649		 */
4650	}
4651	if ((data_processed == 0) && (fwd_tsn_seen)) {
4652		int was_a_gap = 0;
4653
4654		if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
4655		    stcb->asoc.cumulative_tsn, MAX_TSN)) {
4656			/* there was a gap before this data was processed */
4657			was_a_gap = 1;
4658		}
4659		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
4660		if (abort_flag) {
4661			/* Again, we aborted so NO UNLOCK needed */
4662			return;
4663		}
4664	}
4665	/* trigger send of any chunks in queue... */
4666trigger_send:
4667#ifdef SCTP_AUDITING_ENABLED
4668	sctp_audit_log(0xE0, 2);
4669	sctp_auditing(1, inp, stcb, net);
4670#endif
4671	SCTPDBG(SCTP_DEBUG_INPUT1,
4672	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
4673	    stcb->asoc.peers_rwnd,
4674	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
4675	    stcb->asoc.total_flight);
4676	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
4677
4678	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
4679	    ((un_sent) &&
4680	    (stcb->asoc.peers_rwnd > 0 ||
4681	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
4682		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
4683		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
4684		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
4685	}
4686#ifdef SCTP_AUDITING_ENABLED
4687	sctp_audit_log(0xE0, 3);
4688	sctp_auditing(2, inp, stcb, net);
4689#endif
4690	SCTP_TCB_UNLOCK(stcb);
4691	return;
4692}
4693
4694
4695
4696void
4697sctp_input(i_pak, off)
4698	struct mbuf *i_pak;
4699	int off;
4700
4701{
4702#ifdef SCTP_MBUF_LOGGING
4703	struct mbuf *mat;
4704
4705#endif
4706	struct mbuf *m;
4707	int iphlen;
4708	uint32_t vrf_id = 0;
4709	uint8_t ecn_bits;
4710	struct ip *ip;
4711	struct sctphdr *sh;
4712	struct sctp_inpcb *inp = NULL;
4713
4714	uint32_t check, calc_check;
4715	struct sctp_nets *net;
4716	struct sctp_tcb *stcb = NULL;
4717	struct sctp_chunkhdr *ch;
4718	int refcount_up = 0;
4719	int length, mlen, offset;
4720
4721
4722	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
4723		SCTP_RELEASE_PKT(i_pak);
4724		return;
4725	}
4726	mlen = SCTP_HEADER_LEN(i_pak);
4727	iphlen = off;
4728	m = SCTP_HEADER_TO_CHAIN(i_pak);
4729
4730	net = NULL;
4731	SCTP_STAT_INCR(sctps_recvpackets);
4732	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
4733
4734
4735#ifdef SCTP_MBUF_LOGGING
4736	/* Log in any input mbufs */
4737	if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
4738		mat = m;
4739		while (mat) {
4740			if (SCTP_BUF_IS_EXTENDED(mat)) {
4741				sctp_log_mb(mat, SCTP_MBUF_INPUT);
4742			}
4743			mat = SCTP_BUF_NEXT(mat);
4744		}
4745	}
4746#endif
4747#ifdef  SCTP_PACKET_LOGGING
4748	if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
4749		sctp_packet_log(m, mlen);
4750#endif
4751	/*
4752	 * Must take out the iphlen, since mlen expects this (only effect lb
4753	 * case)
4754	 */
4755	mlen -= iphlen;
4756
4757	/*
4758	 * Get IP, SCTP, and first chunk header together in first mbuf.
4759	 */
4760	ip = mtod(m, struct ip *);
4761	offset = iphlen + sizeof(*sh) + sizeof(*ch);
4762	if (SCTP_BUF_LEN(m) < offset) {
4763		if ((m = m_pullup(m, offset)) == 0) {
4764			SCTP_STAT_INCR(sctps_hdrops);
4765			return;
4766		}
4767		ip = mtod(m, struct ip *);
4768	}
4769	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
4770	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
4771	SCTPDBG(SCTP_DEBUG_INPUT1,
4772	    "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
4773
4774	/* SCTP does not allow broadcasts or multicasts */
4775	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
4776		goto bad;
4777	}
4778	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
4779		/*
4780		 * We only look at broadcast if its a front state, All
4781		 * others we will not have a tcb for anyway.
4782		 */
4783		goto bad;
4784	}
4785	/* validate SCTP checksum */
4786	if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) {
4787		/*
4788		 * we do NOT validate things from the loopback if the sysctl
4789		 * is set to 1.
4790		 */
4791		check = sh->checksum;	/* save incoming checksum */
4792		if ((check == 0) && (sctp_no_csum_on_loopback)) {
4793			/*
4794			 * special hook for where we got a local address
4795			 * somehow routed across a non IFT_LOOP type
4796			 * interface
4797			 */
4798			if (ip->ip_src.s_addr == ip->ip_dst.s_addr)
4799				goto sctp_skip_csum_4;
4800		}
4801		sh->checksum = 0;	/* prepare for calc */
4802		calc_check = sctp_calculate_sum(m, &mlen, iphlen);
4803		if (calc_check != check) {
4804			SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
4805			    calc_check, check, m, mlen, iphlen);
4806
4807			stcb = sctp_findassociation_addr(m, iphlen,
4808			    offset - sizeof(*ch),
4809			    sh, ch, &inp, &net,
4810			    vrf_id);
4811			if ((inp) && (stcb)) {
4812				sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
4813				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR);
4814			} else if ((inp != NULL) && (stcb == NULL)) {
4815				refcount_up = 1;
4816			}
4817			SCTP_STAT_INCR(sctps_badsum);
4818			SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
4819			goto bad;
4820		}
4821		sh->checksum = calc_check;
4822	}
4823sctp_skip_csum_4:
4824	/* destination port of 0 is illegal, based on RFC2960. */
4825	if (sh->dest_port == 0) {
4826		SCTP_STAT_INCR(sctps_hdrops);
4827		goto bad;
4828	}
4829	/* validate mbuf chain length with IP payload length */
4830	if (mlen < (ip->ip_len - iphlen)) {
4831		SCTP_STAT_INCR(sctps_hdrops);
4832		goto bad;
4833	}
4834	/*
4835	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
4836	 * IP/SCTP/first chunk header...
4837	 */
4838	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
4839	    sh, ch, &inp, &net, vrf_id);
4840	/* inp's ref-count increased && stcb locked */
4841	if (inp == NULL) {
4842		struct sctp_init_chunk *init_chk, chunk_buf;
4843
4844		SCTP_STAT_INCR(sctps_noport);
4845#ifdef ICMP_BANDLIM
4846		/*
4847		 * we use the bandwidth limiting to protect against sending
4848		 * too many ABORTS all at once. In this case these count the
4849		 * same as an ICMP message.
4850		 */
4851		if (badport_bandlim(0) < 0)
4852			goto bad;
4853#endif				/* ICMP_BANDLIM */
4854		SCTPDBG(SCTP_DEBUG_INPUT1,
4855		    "Sending a ABORT from packet entry!\n");
4856		if (ch->chunk_type == SCTP_INITIATION) {
4857			/*
4858			 * we do a trick here to get the INIT tag, dig in
4859			 * and get the tag from the INIT and put it in the
4860			 * common header.
4861			 */
4862			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4863			    iphlen + sizeof(*sh), sizeof(*init_chk),
4864			    (uint8_t *) & chunk_buf);
4865			if (init_chk != NULL)
4866				sh->v_tag = init_chk->init.initiate_tag;
4867		}
4868		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4869			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
4870			goto bad;
4871		}
4872		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
4873			goto bad;
4874		}
4875		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
4876			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id);
4877		goto bad;
4878	} else if (stcb == NULL) {
4879		refcount_up = 1;
4880	}
4881#ifdef FAST_IPSEC
4882	/*
4883	 * I very much doubt any of the IPSEC stuff will work but I have no
4884	 * idea, so I will leave it in place.
4885	 */
4886
4887	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
4888		ipsec4stat.in_polvio++;
4889		SCTP_STAT_INCR(sctps_hdrops);
4890		goto bad;
4891	}
4892#endif				/* IPSEC */
4893
4894	/*
4895	 * common chunk processing
4896	 */
4897	length = ip->ip_len + iphlen;
4898	offset -= sizeof(struct sctp_chunkhdr);
4899
4900	ecn_bits = ip->ip_tos;
4901
4902	/* sa_ignore NO_NULL_CHK */
4903	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
4904	    inp, stcb, net, ecn_bits, vrf_id);
4905	/* inp's ref-count reduced && stcb unlocked */
4906	if (m) {
4907		sctp_m_freem(m);
4908	}
4909	if ((inp) && (refcount_up)) {
4910		/* reduce ref-count */
4911		SCTP_INP_WLOCK(inp);
4912		SCTP_INP_DECR_REF(inp);
4913		SCTP_INP_WUNLOCK(inp);
4914	}
4915	return;
4916bad:
4917	if (stcb) {
4918		SCTP_TCB_UNLOCK(stcb);
4919	}
4920	if ((inp) && (refcount_up)) {
4921		/* reduce ref-count */
4922		SCTP_INP_WLOCK(inp);
4923		SCTP_INP_DECR_REF(inp);
4924		SCTP_INP_WUNLOCK(inp);
4925	}
4926	if (m) {
4927		sctp_m_freem(m);
4928	}
4929	return;
4930}
4931