1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/netinet/sctp_input.c 364651 2020-08-24 09:46:36Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_auth.h>
45#include <netinet/sctp_indata.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_bsd_addr.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/sctp_crc32.h>
50#if defined(INET) || defined(INET6)
51#include <netinet/udp.h>
52#endif
53#include <sys/smp.h>
54
55
56
57static void
58sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
59{
60	struct sctp_nets *net;
61
62	/*
63	 * This now not only stops all cookie timers it also stops any INIT
64	 * timers as well. This will make sure that the timers are stopped
65	 * in all collision cases.
66	 */
67	SCTP_TCB_LOCK_ASSERT(stcb);
68	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
69		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
70			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
71			    stcb->sctp_ep,
72			    stcb,
73			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
74		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
75			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
76			    stcb->sctp_ep,
77			    stcb,
78			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
79		}
80	}
81}
82
83/* INIT handler */
84static void
85sctp_handle_init(struct mbuf *m, int iphlen, int offset,
86    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
87    struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
88    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock,
89    uint8_t mflowtype, uint32_t mflowid,
90    uint32_t vrf_id, uint16_t port)
91{
92	struct sctp_init *init;
93	struct mbuf *op_err;
94
95	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
96	    (void *)stcb);
97	if (stcb == NULL) {
98		SCTP_INP_RLOCK(inp);
99	}
100	/* validate length */
101	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
102		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
103		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
104		    mflowtype, mflowid,
105		    vrf_id, port);
106		if (stcb)
107			*abort_no_unlock = 1;
108		goto outnow;
109	}
110	/* validate parameters */
111	init = &cp->init;
112	if (init->initiate_tag == 0) {
113		/* protocol error... send abort */
114		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
115		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
116		    mflowtype, mflowid,
117		    vrf_id, port);
118		if (stcb)
119			*abort_no_unlock = 1;
120		goto outnow;
121	}
122	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
123		/* invalid parameter... send abort */
124		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
125		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
126		    mflowtype, mflowid,
127		    vrf_id, port);
128		if (stcb)
129			*abort_no_unlock = 1;
130		goto outnow;
131	}
132	if (init->num_inbound_streams == 0) {
133		/* protocol error... send abort */
134		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
135		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
136		    mflowtype, mflowid,
137		    vrf_id, port);
138		if (stcb)
139			*abort_no_unlock = 1;
140		goto outnow;
141	}
142	if (init->num_outbound_streams == 0) {
143		/* protocol error... send abort */
144		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
145		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
146		    mflowtype, mflowid,
147		    vrf_id, port);
148		if (stcb)
149			*abort_no_unlock = 1;
150		goto outnow;
151	}
152	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
153	    offset + ntohs(cp->ch.chunk_length))) {
154		/* auth parameter(s) error... send abort */
155		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
156		    "Problem with AUTH parameters");
157		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
158		    mflowtype, mflowid,
159		    vrf_id, port);
160		if (stcb)
161			*abort_no_unlock = 1;
162		goto outnow;
163	}
164	/* We are only accepting if we have a socket with positive
165	 * so_qlimit. */
166	if ((stcb == NULL) &&
167	    ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
168	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
169	    (inp->sctp_socket == NULL) ||
170	    (inp->sctp_socket->so_qlimit == 0))) {
171		/*
172		 * FIX ME ?? What about TCP model and we have a
173		 * match/restart case? Actually no fix is needed. the lookup
174		 * will always find the existing assoc so stcb would not be
175		 * NULL. It may be questionable to do this since we COULD
176		 * just send back the INIT-ACK and hope that the app did
177		 * accept()'s by the time the COOKIE was sent. But there is
178		 * a price to pay for COOKIE generation and I don't want to
179		 * pay it on the chance that the app will actually do some
180		 * accepts(). The App just looses and should NOT be in this
181		 * state :-)
182		 */
183		if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
184			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
185			    "No listener");
186			sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
187			    mflowtype, mflowid, inp->fibnum,
188			    vrf_id, port);
189		}
190		goto outnow;
191	}
192	if ((stcb != NULL) &&
193	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
194		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
195		sctp_send_shutdown_ack(stcb, NULL);
196		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
197	} else {
198		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
199		sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
200		    src, dst, sh, cp,
201		    mflowtype, mflowid,
202		    vrf_id, port);
203	}
204outnow:
205	if (stcb == NULL) {
206		SCTP_INP_RUNLOCK(inp);
207	}
208}
209
210/*
211 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
212 */
213
214int
215sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked
216#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
217    SCTP_UNUSED
218#endif
219)
220{
221	int unsent_data;
222	unsigned int i;
223	struct sctp_stream_queue_pending *sp;
224	struct sctp_association *asoc;
225
226	/*
227	 * This function returns if any stream has true unsent data on it.
228	 * Note that as it looks through it will clean up any places that
229	 * have old data that has been sent but left at top of stream queue.
230	 */
231	asoc = &stcb->asoc;
232	unsent_data = 0;
233	SCTP_TCB_SEND_LOCK(stcb);
234	if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
235		/* Check to see if some data queued */
236		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
237			/* sa_ignore FREED_MEMORY */
238			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
239			if (sp == NULL) {
240				continue;
241			}
242			if ((sp->msg_is_complete) &&
243			    (sp->length == 0) &&
244			    (sp->sender_all_done)) {
245				/*
246				 * We are doing differed cleanup. Last time
247				 * through when we took all the data the
248				 * sender_all_done was not set.
249				 */
250				if (sp->put_last_out == 0) {
251					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
252					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
253					    sp->sender_all_done,
254					    sp->length,
255					    sp->msg_is_complete,
256					    sp->put_last_out);
257				}
258				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
259				TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
260				stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp, 1);
261				if (sp->net) {
262					sctp_free_remote_addr(sp->net);
263					sp->net = NULL;
264				}
265				if (sp->data) {
266					sctp_m_freem(sp->data);
267					sp->data = NULL;
268				}
269				sctp_free_a_strmoq(stcb, sp, so_locked);
270				if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
271					unsent_data++;
272				}
273			} else {
274				unsent_data++;
275			}
276			if (unsent_data > 0) {
277				break;
278			}
279		}
280	}
281	SCTP_TCB_SEND_UNLOCK(stcb);
282	return (unsent_data);
283}
284
285static int
286sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
287{
288	struct sctp_init *init;
289	struct sctp_association *asoc;
290	struct sctp_nets *lnet;
291	unsigned int i;
292
293	init = &cp->init;
294	asoc = &stcb->asoc;
295	/* save off parameters */
296	asoc->peer_vtag = ntohl(init->initiate_tag);
297	asoc->peers_rwnd = ntohl(init->a_rwnd);
298	/* init tsn's */
299	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
300
301	if (!TAILQ_EMPTY(&asoc->nets)) {
302		/* update any ssthresh's that may have a default */
303		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
304			lnet->ssthresh = asoc->peers_rwnd;
305			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
306				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
307			}
308
309		}
310	}
311	SCTP_TCB_SEND_LOCK(stcb);
312	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
313		unsigned int newcnt;
314		struct sctp_stream_out *outs;
315		struct sctp_stream_queue_pending *sp, *nsp;
316		struct sctp_tmit_chunk *chk, *nchk;
317
318		/* abandon the upper streams */
319		newcnt = ntohs(init->num_inbound_streams);
320		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
321			if (chk->rec.data.sid >= newcnt) {
322				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
323				asoc->send_queue_cnt--;
324				if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
325					asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
326#ifdef INVARIANTS
327				} else {
328					panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
329#endif
330				}
331				if (chk->data != NULL) {
332					sctp_free_bufspace(stcb, asoc, chk, 1);
333					sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
334					    0, chk, SCTP_SO_NOT_LOCKED);
335					if (chk->data) {
336						sctp_m_freem(chk->data);
337						chk->data = NULL;
338					}
339				}
340				sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
341				/* sa_ignore FREED_MEMORY */
342			}
343		}
344		if (asoc->strmout) {
345			for (i = newcnt; i < asoc->pre_open_streams; i++) {
346				outs = &asoc->strmout[i];
347				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
348					atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
349					TAILQ_REMOVE(&outs->outqueue, sp, next);
350					stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
351					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
352					    stcb, 0, sp, SCTP_SO_NOT_LOCKED);
353					if (sp->data) {
354						sctp_m_freem(sp->data);
355						sp->data = NULL;
356					}
357					if (sp->net) {
358						sctp_free_remote_addr(sp->net);
359						sp->net = NULL;
360					}
361					/* Free the chunk */
362					sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
363					/* sa_ignore FREED_MEMORY */
364				}
365				outs->state = SCTP_STREAM_CLOSED;
366			}
367		}
368		/* cut back the count */
369		asoc->pre_open_streams = newcnt;
370	}
371	SCTP_TCB_SEND_UNLOCK(stcb);
372	asoc->streamoutcnt = asoc->pre_open_streams;
373	if (asoc->strmout) {
374		for (i = 0; i < asoc->streamoutcnt; i++) {
375			asoc->strmout[i].state = SCTP_STREAM_OPEN;
376		}
377	}
378	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
379	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
380	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
381		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
382	}
383	/* This is the next one we expect */
384	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
385
386	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
387	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
388
389	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
390	/* open the requested streams */
391
392	if (asoc->strmin != NULL) {
393		/* Free the old ones */
394		for (i = 0; i < asoc->streamincnt; i++) {
395			sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
396			sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
397		}
398		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
399	}
400	if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
401		asoc->streamincnt = ntohs(init->num_outbound_streams);
402	} else {
403		asoc->streamincnt = asoc->max_inbound_streams;
404	}
405	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
406	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
407	if (asoc->strmin == NULL) {
408		/* we didn't get memory for the streams! */
409		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
410		return (-1);
411	}
412	for (i = 0; i < asoc->streamincnt; i++) {
413		asoc->strmin[i].sid = i;
414		asoc->strmin[i].last_mid_delivered = 0xffffffff;
415		TAILQ_INIT(&asoc->strmin[i].inqueue);
416		TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
417		asoc->strmin[i].pd_api_started = 0;
418		asoc->strmin[i].delivery_started = 0;
419	}
420	/*
421	 * load_address_from_init will put the addresses into the
422	 * association when the COOKIE is processed or the INIT-ACK is
423	 * processed. Both types of COOKIE's existing and new call this
424	 * routine. It will remove addresses that are no longer in the
425	 * association (for the restarting case where addresses are
426	 * removed). Up front when the INIT arrives we will discard it if it
427	 * is a restart and new addresses have been added.
428	 */
429	/* sa_ignore MEMLEAK */
430	return (0);
431}
432
433/*
434 * INIT-ACK message processing/consumption returns value < 0 on error
435 */
436static int
437sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
438    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
439    struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
440    struct sctp_nets *net, int *abort_no_unlock,
441    uint8_t mflowtype, uint32_t mflowid,
442    uint32_t vrf_id)
443{
444	struct sctp_association *asoc;
445	struct mbuf *op_err;
446	int retval, abort_flag, cookie_found;
447	int initack_limit;
448	int nat_friendly = 0;
449
450	/* First verify that we have no illegal param's */
451	abort_flag = 0;
452	cookie_found = 0;
453
454	op_err = sctp_arethere_unrecognized_parameters(m,
455	    (offset + sizeof(struct sctp_init_chunk)),
456	    &abort_flag, (struct sctp_chunkhdr *)cp,
457	    &nat_friendly, &cookie_found);
458	if (abort_flag) {
459		/* Send an abort and notify peer */
460		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
461		*abort_no_unlock = 1;
462		return (-1);
463	}
464	if (!cookie_found) {
465		uint16_t len;
466
467		/* Only report the missing cookie parameter */
468		if (op_err != NULL) {
469			sctp_m_freem(op_err);
470		}
471		len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t));
472		/* We abort with an error of missing mandatory param */
473		op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
474		if (op_err != NULL) {
475			struct sctp_error_missing_param *cause;
476
477			SCTP_BUF_LEN(op_err) = len;
478			cause = mtod(op_err, struct sctp_error_missing_param *);
479			/* Subtract the reserved param */
480			cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM);
481			cause->cause.length = htons(len);
482			cause->num_missing_params = htonl(1);
483			cause->type[0] = htons(SCTP_STATE_COOKIE);
484		}
485		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
486		    src, dst, sh, op_err,
487		    mflowtype, mflowid,
488		    vrf_id, net->port);
489		*abort_no_unlock = 1;
490		return (-3);
491	}
492	asoc = &stcb->asoc;
493	asoc->peer_supports_nat = (uint8_t)nat_friendly;
494	/* process the peer's parameters in the INIT-ACK */
495	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
496	if (retval < 0) {
497		if (op_err != NULL) {
498			sctp_m_freem(op_err);
499		}
500		return (retval);
501	}
502	initack_limit = offset + ntohs(cp->ch.chunk_length);
503	/* load all addresses */
504	if ((retval = sctp_load_addresses_from_init(stcb, m,
505	    (offset + sizeof(struct sctp_init_chunk)), initack_limit,
506	    src, dst, NULL, stcb->asoc.port))) {
507		if (op_err != NULL) {
508			sctp_m_freem(op_err);
509		}
510		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
511		    "Problem with address parameters");
512		SCTPDBG(SCTP_DEBUG_INPUT1,
513		    "Load addresses from INIT causes an abort %d\n",
514		    retval);
515		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
516		    src, dst, sh, op_err,
517		    mflowtype, mflowid,
518		    vrf_id, net->port);
519		*abort_no_unlock = 1;
520		return (-1);
521	}
522	/* if the peer doesn't support asconf, flush the asconf queue */
523	if (asoc->asconf_supported == 0) {
524		struct sctp_asconf_addr *param, *nparam;
525
526		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
527			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
528			SCTP_FREE(param, SCTP_M_ASC_ADDR);
529		}
530	}
531
532	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
533	    stcb->asoc.local_hmacs);
534	if (op_err) {
535		sctp_queue_op_err(stcb, op_err);
536		/* queuing will steal away the mbuf chain to the out queue */
537		op_err = NULL;
538	}
539	/* extract the cookie and queue it to "echo" it back... */
540	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
541		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
542		    stcb->asoc.overall_error_count,
543		    0,
544		    SCTP_FROM_SCTP_INPUT,
545		    __LINE__);
546	}
547	stcb->asoc.overall_error_count = 0;
548	net->error_count = 0;
549
550	/*
551	 * Cancel the INIT timer, We do this first before queueing the
552	 * cookie. We always cancel at the primary to assue that we are
553	 * canceling the timer started by the INIT which always goes to the
554	 * primary.
555	 */
556	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
557	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
558
559	/* calculate the RTO */
560	sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
561	    SCTP_RTT_FROM_NON_DATA);
562	retval = sctp_send_cookie_echo(m, offset, initack_limit, stcb, net);
563	return (retval);
564}
565
566static void
567sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
568    struct sctp_tcb *stcb, struct sctp_nets *net)
569{
570	union sctp_sockstore store;
571	struct sctp_nets *r_net, *f_net;
572	struct timeval tv;
573	int req_prim = 0;
574	uint16_t old_error_counter;
575
576	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
577		/* Invalid length */
578		return;
579	}
580
581	memset(&store, 0, sizeof(store));
582	switch (cp->heartbeat.hb_info.addr_family) {
583#ifdef INET
584	case AF_INET:
585		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
586			store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
587			store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
588			store.sin.sin_port = stcb->rport;
589			memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
590			    sizeof(store.sin.sin_addr));
591		} else {
592			return;
593		}
594		break;
595#endif
596#ifdef INET6
597	case AF_INET6:
598		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
599			store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
600			store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
601			store.sin6.sin6_port = stcb->rport;
602			memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
603		} else {
604			return;
605		}
606		break;
607#endif
608	default:
609		return;
610	}
611	r_net = sctp_findnet(stcb, &store.sa);
612	if (r_net == NULL) {
613		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
614		return;
615	}
616	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
617	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
618	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
619		/*
620		 * If the its a HB and it's random value is correct when can
621		 * confirm the destination.
622		 */
623		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
624		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
625			stcb->asoc.primary_destination = r_net;
626			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
627			f_net = TAILQ_FIRST(&stcb->asoc.nets);
628			if (f_net != r_net) {
629				/*
630				 * first one on the list is NOT the primary
631				 * sctp_cmpaddr() is much more efficient if
632				 * the primary is the first on the list,
633				 * make it so.
634				 */
635				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
636				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
637			}
638			req_prim = 1;
639		}
640		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
641		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
642		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
643		    r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
644		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
645	}
646	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
647		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
648		    stcb->asoc.overall_error_count,
649		    0,
650		    SCTP_FROM_SCTP_INPUT,
651		    __LINE__);
652	}
653	stcb->asoc.overall_error_count = 0;
654	old_error_counter = r_net->error_count;
655	r_net->error_count = 0;
656	r_net->hb_responded = 1;
657	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
658	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
659	/* Now lets do a RTO with this */
660	sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv,
661	    SCTP_RTT_FROM_NON_DATA);
662	if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
663		r_net->dest_state |= SCTP_ADDR_REACHABLE;
664		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
665		    0, (void *)r_net, SCTP_SO_NOT_LOCKED);
666	}
667	if (r_net->dest_state & SCTP_ADDR_PF) {
668		r_net->dest_state &= ~SCTP_ADDR_PF;
669		stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
670	}
671	if (old_error_counter > 0) {
672		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
673		    stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
674		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
675	}
676	if (r_net == stcb->asoc.primary_destination) {
677		if (stcb->asoc.alternate) {
678			/* release the alternate, primary is good */
679			sctp_free_remote_addr(stcb->asoc.alternate);
680			stcb->asoc.alternate = NULL;
681		}
682	}
683	/* Mobility adaptation */
684	if (req_prim) {
685		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
686		    SCTP_MOBILITY_BASE) ||
687		    sctp_is_mobility_feature_on(stcb->sctp_ep,
688		    SCTP_MOBILITY_FASTHANDOFF)) &&
689		    sctp_is_mobility_feature_on(stcb->sctp_ep,
690		    SCTP_MOBILITY_PRIM_DELETED)) {
691
692			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
693			    stcb->sctp_ep, stcb, NULL,
694			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
695			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
696			    SCTP_MOBILITY_FASTHANDOFF)) {
697				sctp_assoc_immediate_retrans(stcb,
698				    stcb->asoc.primary_destination);
699			}
700			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
701			    SCTP_MOBILITY_BASE)) {
702				sctp_move_chunks_from_net(stcb,
703				    stcb->asoc.deleted_primary);
704			}
705			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
706			    stcb->asoc.deleted_primary);
707		}
708	}
709}
710
711static int
712sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
713{
714	/*
715	 * Return 0 means we want you to proceed with the abort non-zero
716	 * means no abort processing.
717	 */
718	uint32_t new_vtag;
719	struct sctpasochead *head;
720
721	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
722	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
723		new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
724		atomic_add_int(&stcb->asoc.refcnt, 1);
725		SCTP_TCB_UNLOCK(stcb);
726		SCTP_INP_INFO_WLOCK();
727		SCTP_TCB_LOCK(stcb);
728		atomic_subtract_int(&stcb->asoc.refcnt, 1);
729	} else {
730		return (0);
731	}
732	if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
733		/* generate a new vtag and send init */
734		LIST_REMOVE(stcb, sctp_asocs);
735		stcb->asoc.my_vtag = new_vtag;
736		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
737		/*
738		 * put it in the bucket in the vtag hash of assoc's for the
739		 * system
740		 */
741		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
742		SCTP_INP_INFO_WUNLOCK();
743		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
744		return (1);
745	} else {
746		/*
747		 * treat like a case where the cookie expired i.e.: - dump
748		 * current cookie. - generate a new vtag. - resend init.
749		 */
750		/* generate a new vtag and send init */
751		LIST_REMOVE(stcb, sctp_asocs);
752		SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
753		sctp_stop_all_cookie_timers(stcb);
754		sctp_toss_old_cookies(stcb, &stcb->asoc);
755		stcb->asoc.my_vtag = new_vtag;
756		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
757		/*
758		 * put it in the bucket in the vtag hash of assoc's for the
759		 * system
760		 */
761		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
762		SCTP_INP_INFO_WUNLOCK();
763		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
764		return (1);
765	}
766	return (0);
767}
768
769static int
770sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
771    struct sctp_nets *net)
772{
773	/*
774	 * return 0 means we want you to proceed with the abort non-zero
775	 * means no abort processing
776	 */
777	if (stcb->asoc.auth_supported == 0) {
778		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
779		return (0);
780	}
781	sctp_asconf_send_nat_state_update(stcb, net);
782	return (1);
783}
784
785
786/* Returns 1 if the stcb was aborted, 0 otherwise */
787static int
788sctp_handle_abort(struct sctp_abort_chunk *abort,
789    struct sctp_tcb *stcb, struct sctp_nets *net)
790{
791#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
792	struct socket *so;
793#endif
794	uint16_t len;
795	uint16_t error;
796
797	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
798	if (stcb == NULL)
799		return (0);
800
801	len = ntohs(abort->ch.chunk_length);
802	if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause)) {
803		/*
804		 * Need to check the cause codes for our two magic nat
805		 * aborts which don't kill the assoc necessarily.
806		 */
807		struct sctp_error_cause *cause;
808
809		cause = (struct sctp_error_cause *)(abort + 1);
810		error = ntohs(cause->code);
811		if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
812			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
813			    abort->ch.chunk_flags);
814			if (sctp_handle_nat_colliding_state(stcb)) {
815				return (0);
816			}
817		} else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
818			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
819			    abort->ch.chunk_flags);
820			if (sctp_handle_nat_missing_state(stcb, net)) {
821				return (0);
822			}
823		}
824	} else {
825		error = 0;
826	}
827	/* stop any receive timers */
828	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net,
829	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
830	/* notify user of the abort and clean up... */
831	sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
832	/* free the tcb */
833	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
834	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
835	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
836		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
837	}
838#ifdef SCTP_ASOCLOG_OF_TSNS
839	sctp_print_out_track_log(stcb);
840#endif
841#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
842	so = SCTP_INP_SO(stcb->sctp_ep);
843	atomic_add_int(&stcb->asoc.refcnt, 1);
844	SCTP_TCB_UNLOCK(stcb);
845	SCTP_SOCKET_LOCK(so, 1);
846	SCTP_TCB_LOCK(stcb);
847	atomic_subtract_int(&stcb->asoc.refcnt, 1);
848#endif
849	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
850	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
851#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
852	SCTP_SOCKET_UNLOCK(so, 1);
853#endif
854	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
855	return (1);
856}
857
858static void
859sctp_start_net_timers(struct sctp_tcb *stcb)
860{
861	uint32_t cnt_hb_sent;
862	struct sctp_nets *net;
863
864	cnt_hb_sent = 0;
865	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
866		/*
867		 * For each network start: 1) A pmtu timer. 2) A HB timer 3)
868		 * If the dest in unconfirmed send a hb as well if under
869		 * max_hb_burst have been sent.
870		 */
871		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
872		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
873		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
874		    (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
875			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
876			cnt_hb_sent++;
877		}
878	}
879	if (cnt_hb_sent) {
880		sctp_chunk_output(stcb->sctp_ep, stcb,
881		    SCTP_OUTPUT_FROM_COOKIE_ACK,
882		    SCTP_SO_NOT_LOCKED);
883	}
884}
885
886
887static void
888sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
889    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
890{
891	struct sctp_association *asoc;
892	int some_on_streamwheel;
893	int old_state;
894#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
895	struct socket *so;
896#endif
897
898	SCTPDBG(SCTP_DEBUG_INPUT2,
899	    "sctp_handle_shutdown: handling SHUTDOWN\n");
900	if (stcb == NULL)
901		return;
902	asoc = &stcb->asoc;
903	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
904	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
905		return;
906	}
907	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
908		/* Shutdown NOT the expected size */
909		return;
910	}
911	old_state = SCTP_GET_STATE(stcb);
912	sctp_update_acked(stcb, cp, abort_flag);
913	if (*abort_flag) {
914		return;
915	}
916	if (asoc->control_pdapi) {
917		/*
918		 * With a normal shutdown we assume the end of last record.
919		 */
920		SCTP_INP_READ_LOCK(stcb->sctp_ep);
921		if (asoc->control_pdapi->on_strm_q) {
922			struct sctp_stream_in *strm;
923
924			strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream];
925			if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) {
926				/* Unordered */
927				TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm);
928				asoc->control_pdapi->on_strm_q = 0;
929			} else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) {
930				/* Ordered */
931				TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm);
932				asoc->control_pdapi->on_strm_q = 0;
933#ifdef INVARIANTS
934			} else {
935				panic("Unknown state on ctrl:%p on_strm_q:%d",
936				    asoc->control_pdapi,
937				    asoc->control_pdapi->on_strm_q);
938#endif
939			}
940		}
941		asoc->control_pdapi->end_added = 1;
942		asoc->control_pdapi->pdapi_aborted = 1;
943		asoc->control_pdapi = NULL;
944		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
945#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
946		so = SCTP_INP_SO(stcb->sctp_ep);
947		atomic_add_int(&stcb->asoc.refcnt, 1);
948		SCTP_TCB_UNLOCK(stcb);
949		SCTP_SOCKET_LOCK(so, 1);
950		SCTP_TCB_LOCK(stcb);
951		atomic_subtract_int(&stcb->asoc.refcnt, 1);
952		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
953			/* assoc was freed while we were unlocked */
954			SCTP_SOCKET_UNLOCK(so, 1);
955			return;
956		}
957#endif
958		if (stcb->sctp_socket) {
959			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
960		}
961#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
962		SCTP_SOCKET_UNLOCK(so, 1);
963#endif
964	}
965	/* goto SHUTDOWN_RECEIVED state to block new requests */
966	if (stcb->sctp_socket) {
967		if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
968		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
969		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
970			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_RECEIVED);
971			SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
972			/*
973			 * notify upper layer that peer has initiated a
974			 * shutdown
975			 */
976			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
977
978			/* reset time */
979			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
980		}
981	}
982	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
983		/*
984		 * stop the shutdown timer, since we WILL move to
985		 * SHUTDOWN-ACK-SENT.
986		 */
987		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
988		    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
989	}
990	/* Now is there unsent data on a stream somewhere? */
991	some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
992
993	if (!TAILQ_EMPTY(&asoc->send_queue) ||
994	    !TAILQ_EMPTY(&asoc->sent_queue) ||
995	    some_on_streamwheel) {
996		/* By returning we will push more data out */
997		return;
998	} else {
999		/* no outstanding data to send, so move on... */
1000		/* send SHUTDOWN-ACK */
1001		/* move to SHUTDOWN-ACK-SENT state */
1002		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
1003		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1004			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1005		}
1006		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
1007		if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
1008			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
1009			sctp_stop_timers_for_shutdown(stcb);
1010			sctp_send_shutdown_ack(stcb, net);
1011			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
1012			    stcb->sctp_ep, stcb, net);
1013		} else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1014			sctp_send_shutdown_ack(stcb, net);
1015		}
1016	}
1017}
1018
1019static void
1020sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
1021    struct sctp_tcb *stcb,
1022    struct sctp_nets *net)
1023{
1024	struct sctp_association *asoc;
1025#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1026	struct socket *so;
1027
1028	so = SCTP_INP_SO(stcb->sctp_ep);
1029#endif
1030	SCTPDBG(SCTP_DEBUG_INPUT2,
1031	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
1032	if (stcb == NULL)
1033		return;
1034
1035	asoc = &stcb->asoc;
1036	/* process according to association state */
1037	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
1038	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1039		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
1040		sctp_send_shutdown_complete(stcb, net, 1);
1041		SCTP_TCB_UNLOCK(stcb);
1042		return;
1043	}
1044	if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
1045	    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1046		/* unexpected SHUTDOWN-ACK... so ignore... */
1047		SCTP_TCB_UNLOCK(stcb);
1048		return;
1049	}
1050	if (asoc->control_pdapi) {
1051		/*
1052		 * With a normal shutdown we assume the end of last record.
1053		 */
1054		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1055		asoc->control_pdapi->end_added = 1;
1056		asoc->control_pdapi->pdapi_aborted = 1;
1057		asoc->control_pdapi = NULL;
1058		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1059#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1060		atomic_add_int(&stcb->asoc.refcnt, 1);
1061		SCTP_TCB_UNLOCK(stcb);
1062		SCTP_SOCKET_LOCK(so, 1);
1063		SCTP_TCB_LOCK(stcb);
1064		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1065		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1066			/* assoc was freed while we were unlocked */
1067			SCTP_SOCKET_UNLOCK(so, 1);
1068			return;
1069		}
1070#endif
1071		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1072#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1073		SCTP_SOCKET_UNLOCK(so, 1);
1074#endif
1075	}
1076#ifdef INVARIANTS
1077	if (!TAILQ_EMPTY(&asoc->send_queue) ||
1078	    !TAILQ_EMPTY(&asoc->sent_queue) ||
1079	    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
1080		panic("Queues are not empty when handling SHUTDOWN-ACK");
1081	}
1082#endif
1083	/* stop the timer */
1084	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
1085	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
1086	/* send SHUTDOWN-COMPLETE */
1087	sctp_send_shutdown_complete(stcb, net, 0);
1088	/* notify upper layer protocol */
1089	if (stcb->sctp_socket) {
1090		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1091		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1092			stcb->sctp_socket->so_snd.sb_cc = 0;
1093		}
1094		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1095	}
1096	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
1097	/* free the TCB but first save off the ep */
1098#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1099	atomic_add_int(&stcb->asoc.refcnt, 1);
1100	SCTP_TCB_UNLOCK(stcb);
1101	SCTP_SOCKET_LOCK(so, 1);
1102	SCTP_TCB_LOCK(stcb);
1103	atomic_subtract_int(&stcb->asoc.refcnt, 1);
1104#endif
1105	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1106	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1107#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1108	SCTP_SOCKET_UNLOCK(so, 1);
1109#endif
1110}
1111
1112static void
1113sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type,
1114    struct sctp_nets *net)
1115{
1116	switch (chunk_type) {
1117	case SCTP_ASCONF_ACK:
1118	case SCTP_ASCONF:
1119		sctp_asconf_cleanup(stcb, net);
1120		break;
1121	case SCTP_IFORWARD_CUM_TSN:
1122	case SCTP_FORWARD_CUM_TSN:
1123		stcb->asoc.prsctp_supported = 0;
1124		break;
1125	default:
1126		SCTPDBG(SCTP_DEBUG_INPUT2,
1127		    "Peer does not support chunk type %d (0x%x).\n",
1128		    chunk_type, chunk_type);
1129		break;
1130	}
1131}
1132
1133/*
1134 * Skip past the param header and then we will find the param that caused the
1135 * problem.  There are a number of param's in a ASCONF OR the prsctp param
1136 * these will turn of specific features.
1137 * XXX: Is this the right thing to do?
1138 */
1139static void
1140sctp_process_unrecog_param(struct sctp_tcb *stcb, uint16_t parameter_type)
1141{
1142	switch (parameter_type) {
1143		/* pr-sctp draft */
1144	case SCTP_PRSCTP_SUPPORTED:
1145		stcb->asoc.prsctp_supported = 0;
1146		break;
1147	case SCTP_SUPPORTED_CHUNK_EXT:
1148		break;
1149		/* draft-ietf-tsvwg-addip-sctp */
1150	case SCTP_HAS_NAT_SUPPORT:
1151		stcb->asoc.peer_supports_nat = 0;
1152		break;
1153	case SCTP_ADD_IP_ADDRESS:
1154	case SCTP_DEL_IP_ADDRESS:
1155	case SCTP_SET_PRIM_ADDR:
1156		stcb->asoc.asconf_supported = 0;
1157		break;
1158	case SCTP_SUCCESS_REPORT:
1159	case SCTP_ERROR_CAUSE_IND:
1160		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1161		SCTPDBG(SCTP_DEBUG_INPUT2,
1162		    "Turning off ASCONF to this strange peer\n");
1163		stcb->asoc.asconf_supported = 0;
1164		break;
1165	default:
1166		SCTPDBG(SCTP_DEBUG_INPUT2,
1167		    "Peer does not support param type %d (0x%x)??\n",
1168		    parameter_type, parameter_type);
1169		break;
1170	}
1171}
1172
1173static int
1174sctp_handle_error(struct sctp_chunkhdr *ch,
1175    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
1176{
1177	struct sctp_error_cause *cause;
1178	struct sctp_association *asoc;
1179	uint32_t remaining_length, adjust;
1180	uint16_t code, cause_code, cause_length;
1181#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1182	struct socket *so;
1183#endif
1184
1185	/* parse through all of the errors and process */
1186	asoc = &stcb->asoc;
1187	cause = (struct sctp_error_cause *)((caddr_t)ch +
1188	    sizeof(struct sctp_chunkhdr));
1189	remaining_length = ntohs(ch->chunk_length);
1190	if (remaining_length > limit) {
1191		remaining_length = limit;
1192	}
1193	if (remaining_length >= sizeof(struct sctp_chunkhdr)) {
1194		remaining_length -= sizeof(struct sctp_chunkhdr);
1195	} else {
1196		remaining_length = 0;
1197	}
1198	code = 0;
1199	while (remaining_length >= sizeof(struct sctp_error_cause)) {
1200		/* Process an Error Cause */
1201		cause_code = ntohs(cause->code);
1202		cause_length = ntohs(cause->length);
1203		if ((cause_length > remaining_length) || (cause_length == 0)) {
1204			/* Invalid cause length, possibly due to truncation. */
1205			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in cause - bytes left: %u cause length: %u\n",
1206			    remaining_length, cause_length);
1207			return (0);
1208		}
1209		if (code == 0) {
1210			/* report the first error cause */
1211			code = cause_code;
1212		}
1213		switch (cause_code) {
1214		case SCTP_CAUSE_INVALID_STREAM:
1215		case SCTP_CAUSE_MISSING_PARAM:
1216		case SCTP_CAUSE_INVALID_PARAM:
1217		case SCTP_CAUSE_NO_USER_DATA:
1218			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %u back? We have a bug :/ (or do they?)\n",
1219			    cause_code);
1220			break;
1221		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1222			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags: %x\n",
1223			    ch->chunk_flags);
1224			if (sctp_handle_nat_colliding_state(stcb)) {
1225				return (0);
1226			}
1227			break;
1228		case SCTP_CAUSE_NAT_MISSING_STATE:
1229			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags: %x\n",
1230			    ch->chunk_flags);
1231			if (sctp_handle_nat_missing_state(stcb, net)) {
1232				return (0);
1233			}
1234			break;
1235		case SCTP_CAUSE_STALE_COOKIE:
1236			/*
1237			 * We only act if we have echoed a cookie and are
1238			 * waiting.
1239			 */
1240			if ((cause_length >= sizeof(struct sctp_error_stale_cookie)) &&
1241			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1242				struct sctp_error_stale_cookie *stale_cookie;
1243
1244				stale_cookie = (struct sctp_error_stale_cookie *)cause;
1245				asoc->cookie_preserve_req = ntohl(stale_cookie->stale_time);
1246				/* Double it to be more robust on RTX */
1247				if (asoc->cookie_preserve_req <= UINT32_MAX / 2) {
1248					asoc->cookie_preserve_req *= 2;
1249				} else {
1250					asoc->cookie_preserve_req = UINT32_MAX;
1251				}
1252				asoc->stale_cookie_count++;
1253				if (asoc->stale_cookie_count >
1254				    asoc->max_init_times) {
1255					sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
1256					/* now free the asoc */
1257#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1258					so = SCTP_INP_SO(stcb->sctp_ep);
1259					atomic_add_int(&stcb->asoc.refcnt, 1);
1260					SCTP_TCB_UNLOCK(stcb);
1261					SCTP_SOCKET_LOCK(so, 1);
1262					SCTP_TCB_LOCK(stcb);
1263					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1264#endif
1265					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1266					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1267#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1268					SCTP_SOCKET_UNLOCK(so, 1);
1269#endif
1270					return (-1);
1271				}
1272				/* blast back to INIT state */
1273				sctp_toss_old_cookies(stcb, &stcb->asoc);
1274				SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
1275				sctp_stop_all_cookie_timers(stcb);
1276				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1277			}
1278			break;
1279		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1280			/*
1281			 * Nothing we can do here, we don't do hostname
1282			 * addresses so if the peer does not like my IPv6
1283			 * (or IPv4 for that matter) it does not matter. If
1284			 * they don't support that type of address, they can
1285			 * NOT possibly get that packet type... i.e. with no
1286			 * IPv6 you can't receive a IPv6 packet. so we can
1287			 * safely ignore this one. If we ever added support
1288			 * for HOSTNAME Addresses, then we would need to do
1289			 * something here.
1290			 */
1291			break;
1292		case SCTP_CAUSE_UNRECOG_CHUNK:
1293			if (cause_length >= sizeof(struct sctp_error_unrecognized_chunk)) {
1294				struct sctp_error_unrecognized_chunk *unrec_chunk;
1295
1296				unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause;
1297				sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type, net);
1298			}
1299			break;
1300		case SCTP_CAUSE_UNRECOG_PARAM:
1301			/* XXX: We only consider the first parameter */
1302			if (cause_length >= sizeof(struct sctp_error_cause) + sizeof(struct sctp_paramhdr)) {
1303				struct sctp_paramhdr *unrec_parameter;
1304
1305				unrec_parameter = (struct sctp_paramhdr *)(cause + 1);
1306				sctp_process_unrecog_param(stcb, ntohs(unrec_parameter->param_type));
1307			}
1308			break;
1309		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1310			/*
1311			 * We ignore this since the timer will drive out a
1312			 * new cookie anyway and there timer will drive us
1313			 * to send a SHUTDOWN_COMPLETE. We can't send one
1314			 * here since we don't have their tag.
1315			 */
1316			break;
1317		case SCTP_CAUSE_DELETING_LAST_ADDR:
1318		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1319		case SCTP_CAUSE_DELETING_SRC_ADDR:
1320			/*
1321			 * We should NOT get these here, but in a
1322			 * ASCONF-ACK.
1323			 */
1324			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a error cause with code %u.\n",
1325			    cause_code);
1326			break;
1327		case SCTP_CAUSE_OUT_OF_RESC:
1328			/*
1329			 * And what, pray tell do we do with the fact that
1330			 * the peer is out of resources? Not really sure we
1331			 * could do anything but abort. I suspect this
1332			 * should have came WITH an abort instead of in a
1333			 * OP-ERROR.
1334			 */
1335			break;
1336		default:
1337			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown code 0x%x\n",
1338			    cause_code);
1339			break;
1340		}
1341		adjust = SCTP_SIZE32(cause_length);
1342		if (remaining_length >= adjust) {
1343			remaining_length -= adjust;
1344		} else {
1345			remaining_length = 0;
1346		}
1347		cause = (struct sctp_error_cause *)((caddr_t)cause + adjust);
1348	}
1349	sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, code, ch, SCTP_SO_NOT_LOCKED);
1350	return (0);
1351}
1352
1353static int
1354sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1355    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
1356    struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1357    struct sctp_nets *net, int *abort_no_unlock,
1358    uint8_t mflowtype, uint32_t mflowid,
1359    uint32_t vrf_id)
1360{
1361	struct sctp_init_ack *init_ack;
1362	struct mbuf *op_err;
1363
1364	SCTPDBG(SCTP_DEBUG_INPUT2,
1365	    "sctp_handle_init_ack: handling INIT-ACK\n");
1366
1367	if (stcb == NULL) {
1368		SCTPDBG(SCTP_DEBUG_INPUT2,
1369		    "sctp_handle_init_ack: TCB is null\n");
1370		return (-1);
1371	}
1372	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1373		/* Invalid length */
1374		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1375		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1376		    src, dst, sh, op_err,
1377		    mflowtype, mflowid,
1378		    vrf_id, net->port);
1379		*abort_no_unlock = 1;
1380		return (-1);
1381	}
1382	init_ack = &cp->init;
1383	/* validate parameters */
1384	if (init_ack->initiate_tag == 0) {
1385		/* protocol error... send an abort */
1386		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1387		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1388		    src, dst, sh, op_err,
1389		    mflowtype, mflowid,
1390		    vrf_id, net->port);
1391		*abort_no_unlock = 1;
1392		return (-1);
1393	}
1394	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1395		/* protocol error... send an abort */
1396		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1397		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1398		    src, dst, sh, op_err,
1399		    mflowtype, mflowid,
1400		    vrf_id, net->port);
1401		*abort_no_unlock = 1;
1402		return (-1);
1403	}
1404	if (init_ack->num_inbound_streams == 0) {
1405		/* protocol error... send an abort */
1406		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1407		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1408		    src, dst, sh, op_err,
1409		    mflowtype, mflowid,
1410		    vrf_id, net->port);
1411		*abort_no_unlock = 1;
1412		return (-1);
1413	}
1414	if (init_ack->num_outbound_streams == 0) {
1415		/* protocol error... send an abort */
1416		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1417		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1418		    src, dst, sh, op_err,
1419		    mflowtype, mflowid,
1420		    vrf_id, net->port);
1421		*abort_no_unlock = 1;
1422		return (-1);
1423	}
1424	/* process according to association state... */
1425	switch (SCTP_GET_STATE(stcb)) {
1426	case SCTP_STATE_COOKIE_WAIT:
1427		/* this is the expected state for this chunk */
1428		/* process the INIT-ACK parameters */
1429		if (stcb->asoc.primary_destination->dest_state &
1430		    SCTP_ADDR_UNCONFIRMED) {
1431			/*
1432			 * The primary is where we sent the INIT, we can
1433			 * always consider it confirmed when the INIT-ACK is
1434			 * returned. Do this before we load addresses
1435			 * though.
1436			 */
1437			stcb->asoc.primary_destination->dest_state &=
1438			    ~SCTP_ADDR_UNCONFIRMED;
1439			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1440			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1441		}
1442		if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
1443		    net, abort_no_unlock,
1444		    mflowtype, mflowid,
1445		    vrf_id) < 0) {
1446			/* error in parsing parameters */
1447			return (-1);
1448		}
1449		/* update our state */
1450		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1451		SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED);
1452
1453		/* reset the RTO calc */
1454		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1455			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1456			    stcb->asoc.overall_error_count,
1457			    0,
1458			    SCTP_FROM_SCTP_INPUT,
1459			    __LINE__);
1460		}
1461		stcb->asoc.overall_error_count = 0;
1462		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1463		/*
1464		 * collapse the init timer back in case of a exponential
1465		 * backoff
1466		 */
1467		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1468		    stcb, net);
1469		/*
1470		 * the send at the end of the inbound data processing will
1471		 * cause the cookie to be sent
1472		 */
1473		break;
1474	case SCTP_STATE_SHUTDOWN_SENT:
1475		/* incorrect state... discard */
1476		break;
1477	case SCTP_STATE_COOKIE_ECHOED:
1478		/* incorrect state... discard */
1479		break;
1480	case SCTP_STATE_OPEN:
1481		/* incorrect state... discard */
1482		break;
1483	case SCTP_STATE_EMPTY:
1484	case SCTP_STATE_INUSE:
1485	default:
1486		/* incorrect state... discard */
1487		return (-1);
1488		break;
1489	}
1490	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1491	return (0);
1492}
1493
1494static struct sctp_tcb *
1495sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1496    struct sockaddr *src, struct sockaddr *dst,
1497    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1498    struct sctp_inpcb *inp, struct sctp_nets **netp,
1499    struct sockaddr *init_src, int *notification,
1500    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1501    uint8_t mflowtype, uint32_t mflowid,
1502    uint32_t vrf_id, uint16_t port);
1503
1504
1505/*
1506 * handle a state cookie for an existing association m: input packet mbuf
1507 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1508 * "split" mbuf and the cookie signature does not exist offset: offset into
1509 * mbuf to the cookie-echo chunk
1510 */
1511static struct sctp_tcb *
1512sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1513    struct sockaddr *src, struct sockaddr *dst,
1514    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1515    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1516    struct sockaddr *init_src, int *notification,
1517    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1518    uint8_t mflowtype, uint32_t mflowid,
1519    uint32_t vrf_id, uint16_t port)
1520{
1521	struct sctp_association *asoc;
1522	struct sctp_init_chunk *init_cp, init_buf;
1523	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1524	struct sctp_nets *net;
1525	struct mbuf *op_err;
1526	struct timeval old;
1527	int init_offset, initack_offset, i;
1528	int retval;
1529	int spec_flag = 0;
1530	uint32_t how_indx;
1531#if defined(SCTP_DETAILED_STR_STATS)
1532	int j;
1533#endif
1534
1535	net = *netp;
1536	/* I know that the TCB is non-NULL from the caller */
1537	asoc = &stcb->asoc;
1538	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1539		if (asoc->cookie_how[how_indx] == 0)
1540			break;
1541	}
1542	if (how_indx < sizeof(asoc->cookie_how)) {
1543		asoc->cookie_how[how_indx] = 1;
1544	}
1545	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1546		/* SHUTDOWN came in after sending INIT-ACK */
1547		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1548		op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
1549		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
1550		    mflowtype, mflowid, inp->fibnum,
1551		    vrf_id, net->port);
1552		if (how_indx < sizeof(asoc->cookie_how))
1553			asoc->cookie_how[how_indx] = 2;
1554		return (NULL);
1555	}
1556	/*
1557	 * find and validate the INIT chunk in the cookie (peer's info) the
1558	 * INIT should start after the cookie-echo header struct (chunk
1559	 * header, state cookie header struct)
1560	 */
1561	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1562
1563	init_cp = (struct sctp_init_chunk *)
1564	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1565	    (uint8_t *)&init_buf);
1566	if (init_cp == NULL) {
1567		/* could not pull a INIT chunk in cookie */
1568		return (NULL);
1569	}
1570	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1571		return (NULL);
1572	}
1573	/*
1574	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1575	 * INIT-ACK follows the INIT chunk
1576	 */
1577	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1578	initack_cp = (struct sctp_init_ack_chunk *)
1579	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1580	    (uint8_t *)&initack_buf);
1581	if (initack_cp == NULL) {
1582		/* could not pull INIT-ACK chunk in cookie */
1583		return (NULL);
1584	}
1585	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1586		return (NULL);
1587	}
1588	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1589	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1590		/*
1591		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1592		 * to get into the OPEN state
1593		 */
1594		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1595			/*-
1596			 * Opps, this means that we somehow generated two vtag's
1597			 * the same. I.e. we did:
1598			 *  Us               Peer
1599			 *   <---INIT(tag=a)------
1600			 *   ----INIT-ACK(tag=t)-->
1601			 *   ----INIT(tag=t)------> *1
1602			 *   <---INIT-ACK(tag=a)---
1603                         *   <----CE(tag=t)------------- *2
1604			 *
1605			 * At point *1 we should be generating a different
1606			 * tag t'. Which means we would throw away the CE and send
1607			 * ours instead. Basically this is case C (throw away side).
1608			 */
1609			if (how_indx < sizeof(asoc->cookie_how))
1610				asoc->cookie_how[how_indx] = 17;
1611			return (NULL);
1612
1613		}
1614		switch (SCTP_GET_STATE(stcb)) {
1615		case SCTP_STATE_COOKIE_WAIT:
1616		case SCTP_STATE_COOKIE_ECHOED:
1617			/*
1618			 * INIT was sent but got a COOKIE_ECHO with the
1619			 * correct tags... just accept it...but we must
1620			 * process the init so that we can make sure we have
1621			 * the right seq no's.
1622			 */
1623			/* First we must process the INIT !! */
1624			retval = sctp_process_init(init_cp, stcb);
1625			if (retval < 0) {
1626				if (how_indx < sizeof(asoc->cookie_how))
1627					asoc->cookie_how[how_indx] = 3;
1628				return (NULL);
1629			}
1630			/* we have already processed the INIT so no problem */
1631			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
1632			    stcb, net,
1633			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1634			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
1635			    stcb, net,
1636			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1637			/* update current state */
1638			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1639				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1640			else
1641				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1642
1643			SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1644			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1645				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1646				    stcb->sctp_ep, stcb, asoc->primary_destination);
1647			}
1648			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1649			sctp_stop_all_cookie_timers(stcb);
1650			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1651			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1652			    (inp->sctp_socket->so_qlimit == 0)
1653			    ) {
1654#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1655				struct socket *so;
1656#endif
1657				/*
1658				 * Here is where collision would go if we
1659				 * did a connect() and instead got a
1660				 * init/init-ack/cookie done before the
1661				 * init-ack came back..
1662				 */
1663				stcb->sctp_ep->sctp_flags |=
1664				    SCTP_PCB_FLAGS_CONNECTED;
1665#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1666				so = SCTP_INP_SO(stcb->sctp_ep);
1667				atomic_add_int(&stcb->asoc.refcnt, 1);
1668				SCTP_TCB_UNLOCK(stcb);
1669				SCTP_SOCKET_LOCK(so, 1);
1670				SCTP_TCB_LOCK(stcb);
1671				atomic_add_int(&stcb->asoc.refcnt, -1);
1672				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1673					SCTP_SOCKET_UNLOCK(so, 1);
1674					return (NULL);
1675				}
1676#endif
1677				soisconnected(stcb->sctp_socket);
1678#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1679				SCTP_SOCKET_UNLOCK(so, 1);
1680#endif
1681			}
1682			/* notify upper layer */
1683			*notification = SCTP_NOTIFY_ASSOC_UP;
1684			/*
1685			 * since we did not send a HB make sure we don't
1686			 * double things
1687			 */
1688			old.tv_sec = cookie->time_entered.tv_sec;
1689			old.tv_usec = cookie->time_entered.tv_usec;
1690			net->hb_responded = 1;
1691			sctp_calculate_rto(stcb, asoc, net, &old,
1692			    SCTP_RTT_FROM_NON_DATA);
1693
1694			if (stcb->asoc.sctp_autoclose_ticks &&
1695			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1696				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1697				    inp, stcb, NULL);
1698			}
1699			break;
1700		default:
1701			/*
1702			 * we're in the OPEN state (or beyond), so peer must
1703			 * have simply lost the COOKIE-ACK
1704			 */
1705			break;
1706		}		/* end switch */
1707		sctp_stop_all_cookie_timers(stcb);
1708		/*
1709		 * We ignore the return code here.. not sure if we should
1710		 * somehow abort.. but we do have an existing asoc. This
1711		 * really should not fail.
1712		 */
1713		if (sctp_load_addresses_from_init(stcb, m,
1714		    init_offset + sizeof(struct sctp_init_chunk),
1715		    initack_offset, src, dst, init_src, stcb->asoc.port)) {
1716			if (how_indx < sizeof(asoc->cookie_how))
1717				asoc->cookie_how[how_indx] = 4;
1718			return (NULL);
1719		}
1720		/* respond with a COOKIE-ACK */
1721		sctp_toss_old_cookies(stcb, asoc);
1722		sctp_send_cookie_ack(stcb);
1723		if (how_indx < sizeof(asoc->cookie_how))
1724			asoc->cookie_how[how_indx] = 5;
1725		return (stcb);
1726	}
1727
1728	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1729	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1730	    cookie->tie_tag_my_vtag == 0 &&
1731	    cookie->tie_tag_peer_vtag == 0) {
1732		/*
1733		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1734		 */
1735		if (how_indx < sizeof(asoc->cookie_how))
1736			asoc->cookie_how[how_indx] = 6;
1737		return (NULL);
1738	}
1739	/*
1740	 * If nat support, and the below and stcb is established, send back
1741	 * a ABORT(colliding state) if we are established.
1742	 */
1743	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) &&
1744	    (asoc->peer_supports_nat) &&
1745	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1746	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1747	    (asoc->peer_vtag == 0)))) {
1748		/*
1749		 * Special case - Peer's support nat. We may have two init's
1750		 * that we gave out the same tag on since one was not
1751		 * established.. i.e. we get INIT from host-1 behind the nat
1752		 * and we respond tag-a, we get a INIT from host-2 behind
1753		 * the nat and we get tag-a again. Then we bring up host-1
1754		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1755		 * Now we have colliding state. We must send an abort here
1756		 * with colliding state indication.
1757		 */
1758		op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
1759		sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
1760		    mflowtype, mflowid, inp->fibnum,
1761		    vrf_id, port);
1762		return (NULL);
1763	}
1764	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1765	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1766	    (asoc->peer_vtag == 0))) {
1767		/*
1768		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1769		 * should be ok, re-accept peer info
1770		 */
1771		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1772			/*
1773			 * Extension of case C. If we hit this, then the
1774			 * random number generator returned the same vtag
1775			 * when we first sent our INIT-ACK and when we later
1776			 * sent our INIT. The side with the seq numbers that
1777			 * are different will be the one that normnally
1778			 * would have hit case C. This in effect "extends"
1779			 * our vtags in this collision case to be 64 bits.
1780			 * The same collision could occur aka you get both
1781			 * vtag and seq number the same twice in a row.. but
1782			 * is much less likely. If it did happen then we
1783			 * would proceed through and bring up the assoc.. we
1784			 * may end up with the wrong stream setup however..
1785			 * which would be bad.. but there is no way to
1786			 * tell.. until we send on a stream that does not
1787			 * exist :-)
1788			 */
1789			if (how_indx < sizeof(asoc->cookie_how))
1790				asoc->cookie_how[how_indx] = 7;
1791
1792			return (NULL);
1793		}
1794		if (how_indx < sizeof(asoc->cookie_how))
1795			asoc->cookie_how[how_indx] = 8;
1796		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
1797		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1798		sctp_stop_all_cookie_timers(stcb);
1799		/*
1800		 * since we did not send a HB make sure we don't double
1801		 * things
1802		 */
1803		net->hb_responded = 1;
1804		if (stcb->asoc.sctp_autoclose_ticks &&
1805		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1806			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1807			    NULL);
1808		}
1809		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1810		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1811
1812		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1813			/*
1814			 * Ok the peer probably discarded our data (if we
1815			 * echoed a cookie+data). So anything on the
1816			 * sent_queue should be marked for retransmit, we
1817			 * may not get something to kick us so it COULD
1818			 * still take a timeout to move these.. but it can't
1819			 * hurt to mark them.
1820			 */
1821			struct sctp_tmit_chunk *chk;
1822
1823			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1824				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1825					chk->sent = SCTP_DATAGRAM_RESEND;
1826					sctp_flight_size_decrease(chk);
1827					sctp_total_flight_decrease(stcb, chk);
1828					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1829					spec_flag++;
1830				}
1831			}
1832
1833		}
1834		/* process the INIT info (peer's info) */
1835		retval = sctp_process_init(init_cp, stcb);
1836		if (retval < 0) {
1837			if (how_indx < sizeof(asoc->cookie_how))
1838				asoc->cookie_how[how_indx] = 9;
1839			return (NULL);
1840		}
1841		if (sctp_load_addresses_from_init(stcb, m,
1842		    init_offset + sizeof(struct sctp_init_chunk),
1843		    initack_offset, src, dst, init_src, stcb->asoc.port)) {
1844			if (how_indx < sizeof(asoc->cookie_how))
1845				asoc->cookie_how[how_indx] = 10;
1846			return (NULL);
1847		}
1848		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
1849		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1850			*notification = SCTP_NOTIFY_ASSOC_UP;
1851
1852			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1853			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1854			    (inp->sctp_socket->so_qlimit == 0)) {
1855#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1856				struct socket *so;
1857#endif
1858				stcb->sctp_ep->sctp_flags |=
1859				    SCTP_PCB_FLAGS_CONNECTED;
1860#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1861				so = SCTP_INP_SO(stcb->sctp_ep);
1862				atomic_add_int(&stcb->asoc.refcnt, 1);
1863				SCTP_TCB_UNLOCK(stcb);
1864				SCTP_SOCKET_LOCK(so, 1);
1865				SCTP_TCB_LOCK(stcb);
1866				atomic_add_int(&stcb->asoc.refcnt, -1);
1867				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1868					SCTP_SOCKET_UNLOCK(so, 1);
1869					return (NULL);
1870				}
1871#endif
1872				soisconnected(stcb->sctp_socket);
1873#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1874				SCTP_SOCKET_UNLOCK(so, 1);
1875#endif
1876			}
1877			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1878				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1879			else
1880				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1881			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1882		} else if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
1883			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1884		} else {
1885			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1886		}
1887		SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1888		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1889			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1890			    stcb->sctp_ep, stcb, asoc->primary_destination);
1891		}
1892		sctp_stop_all_cookie_timers(stcb);
1893		sctp_toss_old_cookies(stcb, asoc);
1894		sctp_send_cookie_ack(stcb);
1895		if (spec_flag) {
1896			/*
1897			 * only if we have retrans set do we do this. What
1898			 * this call does is get only the COOKIE-ACK out and
1899			 * then when we return the normal call to
1900			 * sctp_chunk_output will get the retrans out behind
1901			 * this.
1902			 */
1903			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1904		}
1905		if (how_indx < sizeof(asoc->cookie_how))
1906			asoc->cookie_how[how_indx] = 11;
1907
1908		return (stcb);
1909	}
1910	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1911	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1912	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1913	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1914	    cookie->tie_tag_peer_vtag != 0) {
1915		struct sctpasochead *head;
1916#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1917		struct socket *so;
1918#endif
1919
1920		if (asoc->peer_supports_nat) {
1921			/*
1922			 * This is a gross gross hack. Just call the
1923			 * cookie_new code since we are allowing a duplicate
1924			 * association. I hope this works...
1925			 */
1926			return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
1927			    sh, cookie, cookie_len,
1928			    inp, netp, init_src, notification,
1929			    auth_skipped, auth_offset, auth_len,
1930			    mflowtype, mflowid,
1931			    vrf_id, port));
1932		}
1933		/*
1934		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1935		 */
1936		/* temp code */
1937		if (how_indx < sizeof(asoc->cookie_how))
1938			asoc->cookie_how[how_indx] = 12;
1939		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
1940		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1941		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
1942		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1943
1944		/* notify upper layer */
1945		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1946		atomic_add_int(&stcb->asoc.refcnt, 1);
1947		if ((SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) &&
1948		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1949		    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
1950			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1951		}
1952		if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
1953			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1954		} else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
1955			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1956		}
1957		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1958			SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1959			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1960			    stcb->sctp_ep, stcb, asoc->primary_destination);
1961
1962		} else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
1963			/* move to OPEN state, if not in SHUTDOWN_SENT */
1964			SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1965		}
1966		asoc->pre_open_streams =
1967		    ntohs(initack_cp->init.num_outbound_streams);
1968		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1969		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1970		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1971
1972		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1973
1974		asoc->str_reset_seq_in = asoc->init_seq_number;
1975
1976		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1977		if (asoc->mapping_array) {
1978			memset(asoc->mapping_array, 0,
1979			    asoc->mapping_array_size);
1980		}
1981		if (asoc->nr_mapping_array) {
1982			memset(asoc->nr_mapping_array, 0,
1983			    asoc->mapping_array_size);
1984		}
1985		SCTP_TCB_UNLOCK(stcb);
1986#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1987		so = SCTP_INP_SO(stcb->sctp_ep);
1988		SCTP_SOCKET_LOCK(so, 1);
1989#endif
1990		SCTP_INP_INFO_WLOCK();
1991		SCTP_INP_WLOCK(stcb->sctp_ep);
1992		SCTP_TCB_LOCK(stcb);
1993		atomic_add_int(&stcb->asoc.refcnt, -1);
1994		/* send up all the data */
1995		SCTP_TCB_SEND_LOCK(stcb);
1996
1997		sctp_report_all_outbound(stcb, 0, SCTP_SO_LOCKED);
1998		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1999			stcb->asoc.strmout[i].chunks_on_queues = 0;
2000#if defined(SCTP_DETAILED_STR_STATS)
2001			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
2002				asoc->strmout[i].abandoned_sent[j] = 0;
2003				asoc->strmout[i].abandoned_unsent[j] = 0;
2004			}
2005#else
2006			asoc->strmout[i].abandoned_sent[0] = 0;
2007			asoc->strmout[i].abandoned_unsent[0] = 0;
2008#endif
2009			stcb->asoc.strmout[i].sid = i;
2010			stcb->asoc.strmout[i].next_mid_ordered = 0;
2011			stcb->asoc.strmout[i].next_mid_unordered = 0;
2012			stcb->asoc.strmout[i].last_msg_incomplete = 0;
2013		}
2014		/* process the INIT-ACK info (my info) */
2015		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2016		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2017
2018		/* pull from vtag hash */
2019		LIST_REMOVE(stcb, sctp_asocs);
2020		/* re-insert to new vtag position */
2021		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
2022		    SCTP_BASE_INFO(hashasocmark))];
2023		/*
2024		 * put it in the bucket in the vtag hash of assoc's for the
2025		 * system
2026		 */
2027		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
2028
2029		SCTP_TCB_SEND_UNLOCK(stcb);
2030		SCTP_INP_WUNLOCK(stcb->sctp_ep);
2031		SCTP_INP_INFO_WUNLOCK();
2032#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2033		SCTP_SOCKET_UNLOCK(so, 1);
2034#endif
2035		asoc->total_flight = 0;
2036		asoc->total_flight_count = 0;
2037		/* process the INIT info (peer's info) */
2038		retval = sctp_process_init(init_cp, stcb);
2039		if (retval < 0) {
2040			if (how_indx < sizeof(asoc->cookie_how))
2041				asoc->cookie_how[how_indx] = 13;
2042
2043			return (NULL);
2044		}
2045		/*
2046		 * since we did not send a HB make sure we don't double
2047		 * things
2048		 */
2049		net->hb_responded = 1;
2050
2051		if (sctp_load_addresses_from_init(stcb, m,
2052		    init_offset + sizeof(struct sctp_init_chunk),
2053		    initack_offset, src, dst, init_src, stcb->asoc.port)) {
2054			if (how_indx < sizeof(asoc->cookie_how))
2055				asoc->cookie_how[how_indx] = 14;
2056
2057			return (NULL);
2058		}
2059		/* respond with a COOKIE-ACK */
2060		sctp_stop_all_cookie_timers(stcb);
2061		sctp_toss_old_cookies(stcb, asoc);
2062		sctp_send_cookie_ack(stcb);
2063		if (how_indx < sizeof(asoc->cookie_how))
2064			asoc->cookie_how[how_indx] = 15;
2065
2066		return (stcb);
2067	}
2068	if (how_indx < sizeof(asoc->cookie_how))
2069		asoc->cookie_how[how_indx] = 16;
2070	/* all other cases... */
2071	return (NULL);
2072}
2073
2074
2075/*
2076 * handle a state cookie for a new association m: input packet mbuf chain--
2077 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
2078 * and the cookie signature does not exist offset: offset into mbuf to the
2079 * cookie-echo chunk length: length of the cookie chunk to: where the init
2080 * was from returns a new TCB
2081 */
2082static struct sctp_tcb *
2083sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
2084    struct sockaddr *src, struct sockaddr *dst,
2085    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
2086    struct sctp_inpcb *inp, struct sctp_nets **netp,
2087    struct sockaddr *init_src, int *notification,
2088    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2089    uint8_t mflowtype, uint32_t mflowid,
2090    uint32_t vrf_id, uint16_t port)
2091{
2092	struct sctp_tcb *stcb;
2093	struct sctp_init_chunk *init_cp, init_buf;
2094	struct sctp_init_ack_chunk *initack_cp, initack_buf;
2095	union sctp_sockstore store;
2096	struct sctp_association *asoc;
2097	int init_offset, initack_offset, initack_limit;
2098	int retval;
2099	int error = 0;
2100	uint8_t auth_chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
2101#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2102	struct socket *so;
2103
2104	so = SCTP_INP_SO(inp);
2105#endif
2106
2107	/*
2108	 * find and validate the INIT chunk in the cookie (peer's info) the
2109	 * INIT should start after the cookie-echo header struct (chunk
2110	 * header, state cookie header struct)
2111	 */
2112	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
2113	init_cp = (struct sctp_init_chunk *)
2114	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
2115	    (uint8_t *)&init_buf);
2116	if (init_cp == NULL) {
2117		/* could not pull a INIT chunk in cookie */
2118		SCTPDBG(SCTP_DEBUG_INPUT1,
2119		    "process_cookie_new: could not pull INIT chunk hdr\n");
2120		return (NULL);
2121	}
2122	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
2123		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
2124		return (NULL);
2125	}
2126	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
2127	/*
2128	 * find and validate the INIT-ACK chunk in the cookie (my info) the
2129	 * INIT-ACK follows the INIT chunk
2130	 */
2131	initack_cp = (struct sctp_init_ack_chunk *)
2132	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
2133	    (uint8_t *)&initack_buf);
2134	if (initack_cp == NULL) {
2135		/* could not pull INIT-ACK chunk in cookie */
2136		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2137		return (NULL);
2138	}
2139	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2140		return (NULL);
2141	}
2142	/*
2143	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2144	 * "initack_limit" value.  This is because the chk_length field
2145	 * includes the length of the cookie, but the cookie is omitted when
2146	 * the INIT and INIT_ACK are tacked onto the cookie...
2147	 */
2148	initack_limit = offset + cookie_len;
2149
2150	/*
2151	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2152	 * and popluate
2153	 */
2154
2155	/*
2156	 * Here we do a trick, we set in NULL for the proc/thread argument.
2157	 * We do this since in effect we only use the p argument when the
2158	 * socket is unbound and we must do an implicit bind. Since we are
2159	 * getting a cookie, we cannot be unbound.
2160	 */
2161	stcb = sctp_aloc_assoc(inp, init_src, &error,
2162	    ntohl(initack_cp->init.initiate_tag), vrf_id,
2163	    ntohs(initack_cp->init.num_outbound_streams),
2164	    port,
2165	    (struct thread *)NULL,
2166	    SCTP_DONT_INITIALIZE_AUTH_PARAMS);
2167	if (stcb == NULL) {
2168		struct mbuf *op_err;
2169
2170		/* memory problem? */
2171		SCTPDBG(SCTP_DEBUG_INPUT1,
2172		    "process_cookie_new: no room for another TCB!\n");
2173		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2174		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2175		    src, dst, sh, op_err,
2176		    mflowtype, mflowid,
2177		    vrf_id, port);
2178		return (NULL);
2179	}
2180	/* get the correct sctp_nets */
2181	if (netp)
2182		*netp = sctp_findnet(stcb, init_src);
2183
2184	asoc = &stcb->asoc;
2185	/* get scope variables out of cookie */
2186	asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
2187	asoc->scope.site_scope = cookie->site_scope;
2188	asoc->scope.local_scope = cookie->local_scope;
2189	asoc->scope.loopback_scope = cookie->loopback_scope;
2190
2191	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2192	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2193		struct mbuf *op_err;
2194
2195		/*
2196		 * Houston we have a problem. The EP changed while the
2197		 * cookie was in flight. Only recourse is to abort the
2198		 * association.
2199		 */
2200		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2201		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2202		    src, dst, sh, op_err,
2203		    mflowtype, mflowid,
2204		    vrf_id, port);
2205#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2206		atomic_add_int(&stcb->asoc.refcnt, 1);
2207		SCTP_TCB_UNLOCK(stcb);
2208		SCTP_SOCKET_LOCK(so, 1);
2209		SCTP_TCB_LOCK(stcb);
2210		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2211#endif
2212		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2213		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2214#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2215		SCTP_SOCKET_UNLOCK(so, 1);
2216#endif
2217		return (NULL);
2218	}
2219	/* process the INIT-ACK info (my info) */
2220	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2221	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2222	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2223	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2224	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2225	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2226	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2227	asoc->str_reset_seq_in = asoc->init_seq_number;
2228
2229	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2230
2231	/* process the INIT info (peer's info) */
2232	if (netp)
2233		retval = sctp_process_init(init_cp, stcb);
2234	else
2235		retval = 0;
2236	if (retval < 0) {
2237#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2238		atomic_add_int(&stcb->asoc.refcnt, 1);
2239		SCTP_TCB_UNLOCK(stcb);
2240		SCTP_SOCKET_LOCK(so, 1);
2241		SCTP_TCB_LOCK(stcb);
2242		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2243#endif
2244		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2245		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2246#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2247		SCTP_SOCKET_UNLOCK(so, 1);
2248#endif
2249		return (NULL);
2250	}
2251	/* load all addresses */
2252	if (sctp_load_addresses_from_init(stcb, m,
2253	    init_offset + sizeof(struct sctp_init_chunk), initack_offset,
2254	    src, dst, init_src, port)) {
2255#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2256		atomic_add_int(&stcb->asoc.refcnt, 1);
2257		SCTP_TCB_UNLOCK(stcb);
2258		SCTP_SOCKET_LOCK(so, 1);
2259		SCTP_TCB_LOCK(stcb);
2260		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2261#endif
2262		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2263		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2264#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2265		SCTP_SOCKET_UNLOCK(so, 1);
2266#endif
2267		return (NULL);
2268	}
2269	/*
2270	 * verify any preceding AUTH chunk that was skipped
2271	 */
2272	/* pull the local authentication parameters from the cookie/init-ack */
2273	sctp_auth_get_cookie_params(stcb, m,
2274	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2275	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2276	if (auth_skipped) {
2277		struct sctp_auth_chunk *auth;
2278
2279		if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
2280			auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2281		} else {
2282			auth = NULL;
2283		}
2284		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2285			/* auth HMAC failed, dump the assoc and packet */
2286			SCTPDBG(SCTP_DEBUG_AUTH1,
2287			    "COOKIE-ECHO: AUTH failed\n");
2288#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2289			atomic_add_int(&stcb->asoc.refcnt, 1);
2290			SCTP_TCB_UNLOCK(stcb);
2291			SCTP_SOCKET_LOCK(so, 1);
2292			SCTP_TCB_LOCK(stcb);
2293			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2294#endif
2295			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2296			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2297#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2298			SCTP_SOCKET_UNLOCK(so, 1);
2299#endif
2300			return (NULL);
2301		} else {
2302			/* remaining chunks checked... good to go */
2303			stcb->asoc.authenticated = 1;
2304		}
2305	}
2306
2307	/*
2308	 * if we're doing ASCONFs, check to see if we have any new local
2309	 * addresses that need to get added to the peer (eg. addresses
2310	 * changed while cookie echo in flight).  This needs to be done
2311	 * after we go to the OPEN state to do the correct asconf
2312	 * processing. else, make sure we have the correct addresses in our
2313	 * lists
2314	 */
2315
2316	/* warning, we re-use sin, sin6, sa_store here! */
2317	/* pull in local_address (our "from" address) */
2318	switch (cookie->laddr_type) {
2319#ifdef INET
2320	case SCTP_IPV4_ADDRESS:
2321		/* source addr is IPv4 */
2322		memset(&store.sin, 0, sizeof(struct sockaddr_in));
2323		store.sin.sin_family = AF_INET;
2324		store.sin.sin_len = sizeof(struct sockaddr_in);
2325		store.sin.sin_addr.s_addr = cookie->laddress[0];
2326		break;
2327#endif
2328#ifdef INET6
2329	case SCTP_IPV6_ADDRESS:
2330		/* source addr is IPv6 */
2331		memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
2332		store.sin6.sin6_family = AF_INET6;
2333		store.sin6.sin6_len = sizeof(struct sockaddr_in6);
2334		store.sin6.sin6_scope_id = cookie->scope_id;
2335		memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
2336		break;
2337#endif
2338	default:
2339#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2340		atomic_add_int(&stcb->asoc.refcnt, 1);
2341		SCTP_TCB_UNLOCK(stcb);
2342		SCTP_SOCKET_LOCK(so, 1);
2343		SCTP_TCB_LOCK(stcb);
2344		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2345#endif
2346		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2347		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2348#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2349		SCTP_SOCKET_UNLOCK(so, 1);
2350#endif
2351		return (NULL);
2352	}
2353
2354	/* update current state */
2355	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2356	SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2357	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2358		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2359		    stcb->sctp_ep, stcb, asoc->primary_destination);
2360	}
2361	sctp_stop_all_cookie_timers(stcb);
2362	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2363	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2364
2365	/* set up to notify upper layer */
2366	*notification = SCTP_NOTIFY_ASSOC_UP;
2367	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2368	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2369	    (inp->sctp_socket->so_qlimit == 0)) {
2370		/*
2371		 * This is an endpoint that called connect() how it got a
2372		 * cookie that is NEW is a bit of a mystery. It must be that
2373		 * the INIT was sent, but before it got there.. a complete
2374		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2375		 * should have went to the other code.. not here.. oh well..
2376		 * a bit of protection is worth having..
2377		 */
2378		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2379#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2380		atomic_add_int(&stcb->asoc.refcnt, 1);
2381		SCTP_TCB_UNLOCK(stcb);
2382		SCTP_SOCKET_LOCK(so, 1);
2383		SCTP_TCB_LOCK(stcb);
2384		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2385		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2386			SCTP_SOCKET_UNLOCK(so, 1);
2387			return (NULL);
2388		}
2389#endif
2390		soisconnected(stcb->sctp_socket);
2391#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2392		SCTP_SOCKET_UNLOCK(so, 1);
2393#endif
2394	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2395	    (inp->sctp_socket->so_qlimit)) {
2396		/*
2397		 * We don't want to do anything with this one. Since it is
2398		 * the listening guy. The timer will get started for
2399		 * accepted connections in the caller.
2400		 */
2401		;
2402	}
2403	/* since we did not send a HB make sure we don't double things */
2404	if ((netp) && (*netp))
2405		(*netp)->hb_responded = 1;
2406
2407	if (stcb->asoc.sctp_autoclose_ticks &&
2408	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2409		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2410	}
2411	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2412	if ((netp != NULL) && (*netp != NULL)) {
2413		struct timeval old;
2414
2415		/* calculate the RTT and set the encaps port */
2416		old.tv_sec = cookie->time_entered.tv_sec;
2417		old.tv_usec = cookie->time_entered.tv_usec;
2418		sctp_calculate_rto(stcb, asoc, *netp, &old, SCTP_RTT_FROM_NON_DATA);
2419	}
2420	/* respond with a COOKIE-ACK */
2421	sctp_send_cookie_ack(stcb);
2422
2423	/*
2424	 * check the address lists for any ASCONFs that need to be sent
2425	 * AFTER the cookie-ack is sent
2426	 */
2427	sctp_check_address_list(stcb, m,
2428	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2429	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2430	    &store.sa, cookie->local_scope, cookie->site_scope,
2431	    cookie->ipv4_scope, cookie->loopback_scope);
2432
2433
2434	return (stcb);
2435}
2436
2437/*
2438 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2439 * we NEED to make sure we are not already using the vtag. If so we
2440 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2441	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2442							    SCTP_BASE_INFO(hashasocmark))];
2443	LIST_FOREACH(stcb, head, sctp_asocs) {
2444	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2445		       -- SEND ABORT - TRY AGAIN --
2446		}
2447	}
2448*/
2449
2450/*
2451 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2452 * existing (non-NULL) TCB
2453 */
2454static struct mbuf *
2455sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2456    struct sockaddr *src, struct sockaddr *dst,
2457    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2458    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2459    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2460    struct sctp_tcb **locked_tcb,
2461    uint8_t mflowtype, uint32_t mflowid,
2462    uint32_t vrf_id, uint16_t port)
2463{
2464	struct sctp_state_cookie *cookie;
2465	struct sctp_tcb *l_stcb = *stcb;
2466	struct sctp_inpcb *l_inp;
2467	struct sockaddr *to;
2468	struct sctp_pcb *ep;
2469	struct mbuf *m_sig;
2470	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2471	uint8_t *sig;
2472	uint8_t cookie_ok = 0;
2473	unsigned int sig_offset, cookie_offset;
2474	unsigned int cookie_len;
2475	struct timeval now;
2476	struct timeval time_expires;
2477	int notification = 0;
2478	struct sctp_nets *netl;
2479	int had_a_existing_tcb = 0;
2480	int send_int_conf = 0;
2481#ifdef INET
2482	struct sockaddr_in sin;
2483#endif
2484#ifdef INET6
2485	struct sockaddr_in6 sin6;
2486#endif
2487
2488	SCTPDBG(SCTP_DEBUG_INPUT2,
2489	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2490
2491	if (inp_p == NULL) {
2492		return (NULL);
2493	}
2494	cookie = &cp->cookie;
2495	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2496	cookie_len = ntohs(cp->ch.chunk_length);
2497
2498	if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2499	    sizeof(struct sctp_init_chunk) +
2500	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2501		/* cookie too small */
2502		return (NULL);
2503	}
2504	if ((cookie->peerport != sh->src_port) ||
2505	    (cookie->myport != sh->dest_port) ||
2506	    (cookie->my_vtag != sh->v_tag)) {
2507		/*
2508		 * invalid ports or bad tag.  Note that we always leave the
2509		 * v_tag in the header in network order and when we stored
2510		 * it in the my_vtag slot we also left it in network order.
2511		 * This maintains the match even though it may be in the
2512		 * opposite byte order of the machine :->
2513		 */
2514		return (NULL);
2515	}
2516	/*
2517	 * split off the signature into its own mbuf (since it should not be
2518	 * calculated in the sctp_hmac_m() call).
2519	 */
2520	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2521	m_sig = m_split(m, sig_offset, M_NOWAIT);
2522	if (m_sig == NULL) {
2523		/* out of memory or ?? */
2524		return (NULL);
2525	}
2526#ifdef SCTP_MBUF_LOGGING
2527	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2528		sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT);
2529	}
2530#endif
2531
2532	/*
2533	 * compute the signature/digest for the cookie
2534	 */
2535	ep = &(*inp_p)->sctp_ep;
2536	l_inp = *inp_p;
2537	if (l_stcb) {
2538		SCTP_TCB_UNLOCK(l_stcb);
2539	}
2540	SCTP_INP_RLOCK(l_inp);
2541	if (l_stcb) {
2542		SCTP_TCB_LOCK(l_stcb);
2543	}
2544	/* which cookie is it? */
2545	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2546	    (ep->current_secret_number != ep->last_secret_number)) {
2547		/* it's the old cookie */
2548		(void)sctp_hmac_m(SCTP_HMAC,
2549		    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2550		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2551	} else {
2552		/* it's the current cookie */
2553		(void)sctp_hmac_m(SCTP_HMAC,
2554		    (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
2555		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2556	}
2557	/* get the signature */
2558	SCTP_INP_RUNLOCK(l_inp);
2559	sig = (uint8_t *)sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *)&tmp_sig);
2560	if (sig == NULL) {
2561		/* couldn't find signature */
2562		sctp_m_freem(m_sig);
2563		return (NULL);
2564	}
2565	/* compare the received digest with the computed digest */
2566	if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2567		/* try the old cookie? */
2568		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2569		    (ep->current_secret_number != ep->last_secret_number)) {
2570			/* compute digest with old */
2571			(void)sctp_hmac_m(SCTP_HMAC,
2572			    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2573			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2574			/* compare */
2575			if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2576				cookie_ok = 1;
2577		}
2578	} else {
2579		cookie_ok = 1;
2580	}
2581
2582	/*
2583	 * Now before we continue we must reconstruct our mbuf so that
2584	 * normal processing of any other chunks will work.
2585	 */
2586	{
2587		struct mbuf *m_at;
2588
2589		m_at = m;
2590		while (SCTP_BUF_NEXT(m_at) != NULL) {
2591			m_at = SCTP_BUF_NEXT(m_at);
2592		}
2593		SCTP_BUF_NEXT(m_at) = m_sig;
2594	}
2595
2596	if (cookie_ok == 0) {
2597		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2598		SCTPDBG(SCTP_DEBUG_INPUT2,
2599		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2600		    (uint32_t)offset, cookie_offset, sig_offset);
2601		return (NULL);
2602	}
2603
2604	/*
2605	 * check the cookie timestamps to be sure it's not stale
2606	 */
2607	(void)SCTP_GETTIME_TIMEVAL(&now);
2608	/* Expire time is in Ticks, so we convert to seconds */
2609	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2610	time_expires.tv_usec = cookie->time_entered.tv_usec;
2611	if (timevalcmp(&now, &time_expires, >)) {
2612		/* cookie is stale! */
2613		struct mbuf *op_err;
2614		struct sctp_error_stale_cookie *cause;
2615		struct timeval diff;
2616		uint32_t staleness;
2617
2618		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie),
2619		    0, M_NOWAIT, 1, MT_DATA);
2620		if (op_err == NULL) {
2621			/* FOOBAR */
2622			return (NULL);
2623		}
2624		/* Set the len */
2625		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie);
2626		cause = mtod(op_err, struct sctp_error_stale_cookie *);
2627		cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE);
2628		cause->cause.length = htons((sizeof(struct sctp_paramhdr) +
2629		    (sizeof(uint32_t))));
2630		diff = now;
2631		timevalsub(&diff, &time_expires);
2632		if ((uint32_t)diff.tv_sec > UINT32_MAX / 1000000) {
2633			staleness = UINT32_MAX;
2634		} else {
2635			staleness = diff.tv_sec * 1000000;
2636		}
2637		if (UINT32_MAX - staleness >= (uint32_t)diff.tv_usec) {
2638			staleness += diff.tv_usec;
2639		} else {
2640			staleness = UINT32_MAX;
2641		}
2642		cause->stale_time = htonl(staleness);
2643		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
2644		    mflowtype, mflowid, l_inp->fibnum,
2645		    vrf_id, port);
2646		return (NULL);
2647	}
2648	/*
2649	 * Now we must see with the lookup address if we have an existing
2650	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2651	 * and a INIT collided with us and somewhere the peer sent the
2652	 * cookie on another address besides the single address our assoc
2653	 * had for him. In this case we will have one of the tie-tags set at
2654	 * least AND the address field in the cookie can be used to look it
2655	 * up.
2656	 */
2657	to = NULL;
2658	switch (cookie->addr_type) {
2659#ifdef INET6
2660	case SCTP_IPV6_ADDRESS:
2661		memset(&sin6, 0, sizeof(sin6));
2662		sin6.sin6_family = AF_INET6;
2663		sin6.sin6_len = sizeof(sin6);
2664		sin6.sin6_port = sh->src_port;
2665		sin6.sin6_scope_id = cookie->scope_id;
2666		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2667		    sizeof(sin6.sin6_addr.s6_addr));
2668		to = (struct sockaddr *)&sin6;
2669		break;
2670#endif
2671#ifdef INET
2672	case SCTP_IPV4_ADDRESS:
2673		memset(&sin, 0, sizeof(sin));
2674		sin.sin_family = AF_INET;
2675		sin.sin_len = sizeof(sin);
2676		sin.sin_port = sh->src_port;
2677		sin.sin_addr.s_addr = cookie->address[0];
2678		to = (struct sockaddr *)&sin;
2679		break;
2680#endif
2681	default:
2682		/* This should not happen */
2683		return (NULL);
2684	}
2685	if (*stcb == NULL) {
2686		/* Yep, lets check */
2687		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
2688		if (*stcb == NULL) {
2689			/*
2690			 * We should have only got back the same inp. If we
2691			 * got back a different ep we have a problem. The
2692			 * original findep got back l_inp and now
2693			 */
2694			if (l_inp != *inp_p) {
2695				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2696			}
2697		} else {
2698			if (*locked_tcb == NULL) {
2699				/*
2700				 * In this case we found the assoc only
2701				 * after we locked the create lock. This
2702				 * means we are in a colliding case and we
2703				 * must make sure that we unlock the tcb if
2704				 * its one of the cases where we throw away
2705				 * the incoming packets.
2706				 */
2707				*locked_tcb = *stcb;
2708
2709				/*
2710				 * We must also increment the inp ref count
2711				 * since the ref_count flags was set when we
2712				 * did not find the TCB, now we found it
2713				 * which reduces the refcount.. we must
2714				 * raise it back out to balance it all :-)
2715				 */
2716				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2717				if ((*stcb)->sctp_ep != l_inp) {
2718					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2719					    (void *)(*stcb)->sctp_ep, (void *)l_inp);
2720				}
2721			}
2722		}
2723	}
2724
2725	cookie_len -= SCTP_SIGNATURE_SIZE;
2726	if (*stcb == NULL) {
2727		/* this is the "normal" case... get a new TCB */
2728		*stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
2729		    cookie, cookie_len, *inp_p,
2730		    netp, to, &notification,
2731		    auth_skipped, auth_offset, auth_len,
2732		    mflowtype, mflowid,
2733		    vrf_id, port);
2734	} else {
2735		/* this is abnormal... cookie-echo on existing TCB */
2736		had_a_existing_tcb = 1;
2737		*stcb = sctp_process_cookie_existing(m, iphlen, offset,
2738		    src, dst, sh,
2739		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2740		    &notification, auth_skipped, auth_offset, auth_len,
2741		    mflowtype, mflowid,
2742		    vrf_id, port);
2743	}
2744
2745	if (*stcb == NULL) {
2746		/* still no TCB... must be bad cookie-echo */
2747		return (NULL);
2748	}
2749	if (*netp != NULL) {
2750		(*netp)->flowtype = mflowtype;
2751		(*netp)->flowid = mflowid;
2752	}
2753	/*
2754	 * Ok, we built an association so confirm the address we sent the
2755	 * INIT-ACK to.
2756	 */
2757	netl = sctp_findnet(*stcb, to);
2758	/*
2759	 * This code should in theory NOT run but
2760	 */
2761	if (netl == NULL) {
2762		/* TSNH! Huh, why do I need to add this address here? */
2763		if (sctp_add_remote_addr(*stcb, to, NULL, port,
2764		    SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
2765			return (NULL);
2766		}
2767		netl = sctp_findnet(*stcb, to);
2768	}
2769	if (netl) {
2770		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2771			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2772			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2773			    netl);
2774			send_int_conf = 1;
2775		}
2776	}
2777	sctp_start_net_timers(*stcb);
2778	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2779		if (!had_a_existing_tcb ||
2780		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2781			/*
2782			 * If we have a NEW cookie or the connect never
2783			 * reached the connected state during collision we
2784			 * must do the TCP accept thing.
2785			 */
2786			struct socket *so, *oso;
2787			struct sctp_inpcb *inp;
2788
2789			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2790				/*
2791				 * For a restart we will keep the same
2792				 * socket, no need to do anything. I THINK!!
2793				 */
2794				sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2795				if (send_int_conf) {
2796					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2797					    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2798				}
2799				return (m);
2800			}
2801			oso = (*inp_p)->sctp_socket;
2802			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2803			SCTP_TCB_UNLOCK((*stcb));
2804			CURVNET_SET(oso->so_vnet);
2805			so = sonewconn(oso, 0
2806			    );
2807			CURVNET_RESTORE();
2808			SCTP_TCB_LOCK((*stcb));
2809			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2810
2811			if (so == NULL) {
2812				struct mbuf *op_err;
2813#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2814				struct socket *pcb_so;
2815#endif
2816				/* Too many sockets */
2817				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2818				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2819				sctp_abort_association(*inp_p, NULL, m, iphlen,
2820				    src, dst, sh, op_err,
2821				    mflowtype, mflowid,
2822				    vrf_id, port);
2823#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2824				pcb_so = SCTP_INP_SO(*inp_p);
2825				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2826				SCTP_TCB_UNLOCK((*stcb));
2827				SCTP_SOCKET_LOCK(pcb_so, 1);
2828				SCTP_TCB_LOCK((*stcb));
2829				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2830#endif
2831				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC,
2832				    SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2833#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2834				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2835#endif
2836				return (NULL);
2837			}
2838			inp = (struct sctp_inpcb *)so->so_pcb;
2839			SCTP_INP_INCR_REF(inp);
2840			/*
2841			 * We add the unbound flag here so that if we get an
2842			 * soabort() before we get the move_pcb done, we
2843			 * will properly cleanup.
2844			 */
2845			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2846			    SCTP_PCB_FLAGS_CONNECTED |
2847			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2848			    SCTP_PCB_FLAGS_UNBOUND |
2849			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2850			    SCTP_PCB_FLAGS_DONT_WAKE);
2851			inp->sctp_features = (*inp_p)->sctp_features;
2852			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2853			inp->sctp_socket = so;
2854			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2855			inp->max_cwnd = (*inp_p)->max_cwnd;
2856			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
2857			inp->ecn_supported = (*inp_p)->ecn_supported;
2858			inp->prsctp_supported = (*inp_p)->prsctp_supported;
2859			inp->auth_supported = (*inp_p)->auth_supported;
2860			inp->asconf_supported = (*inp_p)->asconf_supported;
2861			inp->reconfig_supported = (*inp_p)->reconfig_supported;
2862			inp->nrsack_supported = (*inp_p)->nrsack_supported;
2863			inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
2864			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2865			inp->sctp_context = (*inp_p)->sctp_context;
2866			inp->local_strreset_support = (*inp_p)->local_strreset_support;
2867			inp->fibnum = (*inp_p)->fibnum;
2868			inp->inp_starting_point_for_iterator = NULL;
2869			/*
2870			 * copy in the authentication parameters from the
2871			 * original endpoint
2872			 */
2873			if (inp->sctp_ep.local_hmacs)
2874				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2875			inp->sctp_ep.local_hmacs =
2876			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2877			if (inp->sctp_ep.local_auth_chunks)
2878				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2879			inp->sctp_ep.local_auth_chunks =
2880			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2881
2882			/*
2883			 * Now we must move it from one hash table to
2884			 * another and get the tcb in the right place.
2885			 */
2886
2887			/*
2888			 * This is where the one-2-one socket is put into
2889			 * the accept state waiting for the accept!
2890			 */
2891			if (*stcb) {
2892				SCTP_ADD_SUBSTATE(*stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
2893			}
2894			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2895
2896			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2897			SCTP_TCB_UNLOCK((*stcb));
2898
2899			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2900			    0);
2901			SCTP_TCB_LOCK((*stcb));
2902			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2903
2904
2905			/*
2906			 * now we must check to see if we were aborted while
2907			 * the move was going on and the lock/unlock
2908			 * happened.
2909			 */
2910			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2911				/*
2912				 * yep it was, we leave the assoc attached
2913				 * to the socket since the sctp_inpcb_free()
2914				 * call will send an abort for us.
2915				 */
2916				SCTP_INP_DECR_REF(inp);
2917				return (NULL);
2918			}
2919			SCTP_INP_DECR_REF(inp);
2920			/* Switch over to the new guy */
2921			*inp_p = inp;
2922			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2923			if (send_int_conf) {
2924				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2925				    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2926			}
2927
2928			/*
2929			 * Pull it from the incomplete queue and wake the
2930			 * guy
2931			 */
2932#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2933			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2934			SCTP_TCB_UNLOCK((*stcb));
2935			SCTP_SOCKET_LOCK(so, 1);
2936#endif
2937			soisconnected(so);
2938#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2939			SCTP_TCB_LOCK((*stcb));
2940			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2941			SCTP_SOCKET_UNLOCK(so, 1);
2942#endif
2943			return (m);
2944		}
2945	}
2946	if (notification) {
2947		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2948	}
2949	if (send_int_conf) {
2950		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2951		    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2952	}
2953	return (m);
2954}
2955
2956static void
2957sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
2958    struct sctp_tcb *stcb, struct sctp_nets *net)
2959{
2960	/* cp must not be used, others call this without a c-ack :-) */
2961	struct sctp_association *asoc;
2962	struct sctp_tmit_chunk *chk;
2963
2964	SCTPDBG(SCTP_DEBUG_INPUT2,
2965	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2966	if ((stcb == NULL) || (net == NULL)) {
2967		return;
2968	}
2969
2970	asoc = &stcb->asoc;
2971	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2972		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2973		    asoc->overall_error_count,
2974		    0,
2975		    SCTP_FROM_SCTP_INPUT,
2976		    __LINE__);
2977	}
2978	asoc->overall_error_count = 0;
2979	sctp_stop_all_cookie_timers(stcb);
2980	/* process according to association state */
2981	if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
2982		/* state change only needed when I am in right state */
2983		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2984		SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2985		sctp_start_net_timers(stcb);
2986		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2987			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2988			    stcb->sctp_ep, stcb, asoc->primary_destination);
2989
2990		}
2991		/* update RTO */
2992		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2993		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2994		if (asoc->overall_error_count == 0) {
2995			sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
2996			    SCTP_RTT_FROM_NON_DATA);
2997		}
2998		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2999		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3000		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3001		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3002#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3003			struct socket *so;
3004
3005#endif
3006			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3007#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3008			so = SCTP_INP_SO(stcb->sctp_ep);
3009			atomic_add_int(&stcb->asoc.refcnt, 1);
3010			SCTP_TCB_UNLOCK(stcb);
3011			SCTP_SOCKET_LOCK(so, 1);
3012			SCTP_TCB_LOCK(stcb);
3013			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3014#endif
3015			if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
3016				soisconnected(stcb->sctp_socket);
3017			}
3018#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3019			SCTP_SOCKET_UNLOCK(so, 1);
3020#endif
3021		}
3022		/*
3023		 * since we did not send a HB make sure we don't double
3024		 * things
3025		 */
3026		net->hb_responded = 1;
3027
3028		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3029			/*
3030			 * We don't need to do the asconf thing, nor hb or
3031			 * autoclose if the socket is closed.
3032			 */
3033			goto closed_socket;
3034		}
3035
3036		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
3037		    stcb, net);
3038
3039
3040		if (stcb->asoc.sctp_autoclose_ticks &&
3041		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
3042			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
3043			    stcb->sctp_ep, stcb, NULL);
3044		}
3045		/*
3046		 * send ASCONF if parameters are pending and ASCONFs are
3047		 * allowed (eg. addresses changed when init/cookie echo were
3048		 * in flight)
3049		 */
3050		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
3051		    (stcb->asoc.asconf_supported == 1) &&
3052		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
3053#ifdef SCTP_TIMER_BASED_ASCONF
3054			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
3055			    stcb->sctp_ep, stcb,
3056			    stcb->asoc.primary_destination);
3057#else
3058			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
3059			    SCTP_ADDR_NOT_LOCKED);
3060#endif
3061		}
3062	}
3063closed_socket:
3064	/* Toss the cookie if I can */
3065	sctp_toss_old_cookies(stcb, asoc);
3066	/* Restart the timer if we have pending data */
3067	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3068		if (chk->whoTo != NULL) {
3069			break;
3070		}
3071	}
3072	if (chk != NULL) {
3073		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
3074	}
3075}
3076
3077static void
3078sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
3079    struct sctp_tcb *stcb)
3080{
3081	struct sctp_nets *net;
3082	struct sctp_tmit_chunk *lchk;
3083	struct sctp_ecne_chunk bkup;
3084	uint8_t override_bit;
3085	uint32_t tsn, window_data_tsn;
3086	int len;
3087	unsigned int pkt_cnt;
3088
3089	len = ntohs(cp->ch.chunk_length);
3090	if ((len != sizeof(struct sctp_ecne_chunk)) &&
3091	    (len != sizeof(struct old_sctp_ecne_chunk))) {
3092		return;
3093	}
3094	if (len == sizeof(struct old_sctp_ecne_chunk)) {
3095		/* Its the old format */
3096		memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
3097		bkup.num_pkts_since_cwr = htonl(1);
3098		cp = &bkup;
3099	}
3100	SCTP_STAT_INCR(sctps_recvecne);
3101	tsn = ntohl(cp->tsn);
3102	pkt_cnt = ntohl(cp->num_pkts_since_cwr);
3103	lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
3104	if (lchk == NULL) {
3105		window_data_tsn = stcb->asoc.sending_seq - 1;
3106	} else {
3107		window_data_tsn = lchk->rec.data.tsn;
3108	}
3109
3110	/* Find where it was sent to if possible. */
3111	net = NULL;
3112	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
3113		if (lchk->rec.data.tsn == tsn) {
3114			net = lchk->whoTo;
3115			net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
3116			break;
3117		}
3118		if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
3119			break;
3120		}
3121	}
3122	if (net == NULL) {
3123		/*
3124		 * What to do. A previous send of a CWR was possibly lost.
3125		 * See how old it is, we may have it marked on the actual
3126		 * net.
3127		 */
3128		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3129			if (tsn == net->last_cwr_tsn) {
3130				/* Found him, send it off */
3131				break;
3132			}
3133		}
3134		if (net == NULL) {
3135			/*
3136			 * If we reach here, we need to send a special CWR
3137			 * that says hey, we did this a long time ago and
3138			 * you lost the response.
3139			 */
3140			net = TAILQ_FIRST(&stcb->asoc.nets);
3141			if (net == NULL) {
3142				/* TSNH */
3143				return;
3144			}
3145			override_bit = SCTP_CWR_REDUCE_OVERRIDE;
3146		} else {
3147			override_bit = 0;
3148		}
3149	} else {
3150		override_bit = 0;
3151	}
3152	if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
3153	    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3154		/*
3155		 * JRS - Use the congestion control given in the pluggable
3156		 * CC module
3157		 */
3158		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
3159		/*
3160		 * We reduce once every RTT. So we will only lower cwnd at
3161		 * the next sending seq i.e. the window_data_tsn
3162		 */
3163		net->cwr_window_tsn = window_data_tsn;
3164		net->ecn_ce_pkt_cnt += pkt_cnt;
3165		net->lost_cnt = pkt_cnt;
3166		net->last_cwr_tsn = tsn;
3167	} else {
3168		override_bit |= SCTP_CWR_IN_SAME_WINDOW;
3169		if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
3170		    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3171			/*
3172			 * Another loss in the same window update how many
3173			 * marks/packets lost we have had.
3174			 */
3175			int cnt = 1;
3176
3177			if (pkt_cnt > net->lost_cnt) {
3178				/* Should be the case */
3179				cnt = (pkt_cnt - net->lost_cnt);
3180				net->ecn_ce_pkt_cnt += cnt;
3181			}
3182			net->lost_cnt = pkt_cnt;
3183			net->last_cwr_tsn = tsn;
3184			/*
3185			 * Most CC functions will ignore this call, since we
3186			 * are in-window yet of the initial CE the peer saw.
3187			 */
3188			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3189		}
3190	}
3191	/*
3192	 * We always send a CWR this way if our previous one was lost our
3193	 * peer will get an update, or if it is not time again to reduce we
3194	 * still get the cwr to the peer. Note we set the override when we
3195	 * could not find the TSN on the chunk or the destination network.
3196	 */
3197	sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3198}
3199
3200static void
3201sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3202{
3203	/*
3204	 * Here we get a CWR from the peer. We must look in the outqueue and
3205	 * make sure that we have a covered ECNE in the control chunk part.
3206	 * If so remove it.
3207	 */
3208	struct sctp_tmit_chunk *chk, *nchk;
3209	struct sctp_ecne_chunk *ecne;
3210	int override;
3211	uint32_t cwr_tsn;
3212
3213	cwr_tsn = ntohl(cp->tsn);
3214	override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3215	TAILQ_FOREACH_SAFE(chk, &stcb->asoc.control_send_queue, sctp_next, nchk) {
3216		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3217			continue;
3218		}
3219		if ((override == 0) && (chk->whoTo != net)) {
3220			/* Must be from the right src unless override is set */
3221			continue;
3222		}
3223		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3224		if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3225			/* this covers this ECNE, we can remove it */
3226			stcb->asoc.ecn_echo_cnt_onq--;
3227			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3228			    sctp_next);
3229			stcb->asoc.ctrl_queue_cnt--;
3230			sctp_m_freem(chk->data);
3231			chk->data = NULL;
3232			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3233			if (override == 0) {
3234				break;
3235			}
3236		}
3237	}
3238}
3239
3240static void
3241sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
3242    struct sctp_tcb *stcb, struct sctp_nets *net)
3243{
3244#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3245	struct socket *so;
3246#endif
3247
3248	SCTPDBG(SCTP_DEBUG_INPUT2,
3249	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3250	if (stcb == NULL)
3251		return;
3252
3253	/* process according to association state */
3254	if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3255		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3256		SCTPDBG(SCTP_DEBUG_INPUT2,
3257		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3258		SCTP_TCB_UNLOCK(stcb);
3259		return;
3260	}
3261	/* notify upper layer protocol */
3262	if (stcb->sctp_socket) {
3263		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3264	}
3265#ifdef INVARIANTS
3266	if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
3267	    !TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
3268	    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
3269		panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
3270	}
3271#endif
3272	/* stop the timer */
3273	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net,
3274	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3275	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3276	/* free the TCB */
3277	SCTPDBG(SCTP_DEBUG_INPUT2,
3278	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3279#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3280	so = SCTP_INP_SO(stcb->sctp_ep);
3281	atomic_add_int(&stcb->asoc.refcnt, 1);
3282	SCTP_TCB_UNLOCK(stcb);
3283	SCTP_SOCKET_LOCK(so, 1);
3284	SCTP_TCB_LOCK(stcb);
3285	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3286#endif
3287	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
3288	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3289#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3290	SCTP_SOCKET_UNLOCK(so, 1);
3291#endif
3292	return;
3293}
3294
3295static int
3296process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3297    struct sctp_nets *net, uint8_t flg)
3298{
3299	switch (desc->chunk_type) {
3300	case SCTP_DATA:
3301		/* find the tsn to resend (possibly */
3302		{
3303			uint32_t tsn;
3304			struct sctp_tmit_chunk *tp1;
3305
3306			tsn = ntohl(desc->tsn_ifany);
3307			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3308				if (tp1->rec.data.tsn == tsn) {
3309					/* found it */
3310					break;
3311				}
3312				if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
3313					/* not found */
3314					tp1 = NULL;
3315					break;
3316				}
3317			}
3318			if (tp1 == NULL) {
3319				/*
3320				 * Do it the other way , aka without paying
3321				 * attention to queue seq order.
3322				 */
3323				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3324				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3325					if (tp1->rec.data.tsn == tsn) {
3326						/* found it */
3327						break;
3328					}
3329				}
3330			}
3331			if (tp1 == NULL) {
3332				SCTP_STAT_INCR(sctps_pdrptsnnf);
3333			}
3334			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3335				uint8_t *ddp;
3336
3337				if (((flg & SCTP_BADCRC) == 0) &&
3338				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3339					return (0);
3340				}
3341				if ((stcb->asoc.peers_rwnd == 0) &&
3342				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3343					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3344					return (0);
3345				}
3346				if (stcb->asoc.peers_rwnd == 0 &&
3347				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3348					SCTP_STAT_INCR(sctps_pdrpdizrw);
3349					return (0);
3350				}
3351				ddp = (uint8_t *)(mtod(tp1->data, caddr_t)+
3352				    sizeof(struct sctp_data_chunk));
3353				{
3354					unsigned int iii;
3355
3356					for (iii = 0; iii < sizeof(desc->data_bytes);
3357					    iii++) {
3358						if (ddp[iii] != desc->data_bytes[iii]) {
3359							SCTP_STAT_INCR(sctps_pdrpbadd);
3360							return (-1);
3361						}
3362					}
3363				}
3364
3365				if (tp1->do_rtt) {
3366					/*
3367					 * this guy had a RTO calculation
3368					 * pending on it, cancel it
3369					 */
3370					if (tp1->whoTo->rto_needed == 0) {
3371						tp1->whoTo->rto_needed = 1;
3372					}
3373					tp1->do_rtt = 0;
3374				}
3375				SCTP_STAT_INCR(sctps_pdrpmark);
3376				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3377					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3378				/*
3379				 * mark it as if we were doing a FR, since
3380				 * we will be getting gap ack reports behind
3381				 * the info from the router.
3382				 */
3383				tp1->rec.data.doing_fast_retransmit = 1;
3384				/*
3385				 * mark the tsn with what sequences can
3386				 * cause a new FR.
3387				 */
3388				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3389					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3390				} else {
3391					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
3392				}
3393
3394				/* restart the timer */
3395				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3396				    stcb, tp1->whoTo,
3397				    SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3398				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3399				    stcb, tp1->whoTo);
3400
3401				/* fix counts and things */
3402				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3403					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3404					    tp1->whoTo->flight_size,
3405					    tp1->book_size,
3406					    (uint32_t)(uintptr_t)stcb,
3407					    tp1->rec.data.tsn);
3408				}
3409				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3410					sctp_flight_size_decrease(tp1);
3411					sctp_total_flight_decrease(stcb, tp1);
3412				}
3413				tp1->sent = SCTP_DATAGRAM_RESEND;
3414			} {
3415				/* audit code */
3416				unsigned int audit;
3417
3418				audit = 0;
3419				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3420					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3421						audit++;
3422				}
3423				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3424				    sctp_next) {
3425					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3426						audit++;
3427				}
3428				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3429					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3430					    audit, stcb->asoc.sent_queue_retran_cnt);
3431#ifndef SCTP_AUDITING_ENABLED
3432					stcb->asoc.sent_queue_retran_cnt = audit;
3433#endif
3434				}
3435			}
3436		}
3437		break;
3438	case SCTP_ASCONF:
3439		{
3440			struct sctp_tmit_chunk *asconf;
3441
3442			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3443			    sctp_next) {
3444				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3445					break;
3446				}
3447			}
3448			if (asconf) {
3449				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3450					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3451				asconf->sent = SCTP_DATAGRAM_RESEND;
3452				asconf->snd_count--;
3453			}
3454		}
3455		break;
3456	case SCTP_INITIATION:
3457		/* resend the INIT */
3458		stcb->asoc.dropped_special_cnt++;
3459		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3460			/*
3461			 * If we can get it in, in a few attempts we do
3462			 * this, otherwise we let the timer fire.
3463			 */
3464			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3465			    stcb, net,
3466			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
3467			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3468		}
3469		break;
3470	case SCTP_SELECTIVE_ACK:
3471	case SCTP_NR_SELECTIVE_ACK:
3472		/* resend the sack */
3473		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
3474		break;
3475	case SCTP_HEARTBEAT_REQUEST:
3476		/* resend a demand HB */
3477		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3478			/*
3479			 * Only retransmit if we KNOW we wont destroy the
3480			 * tcb
3481			 */
3482			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
3483		}
3484		break;
3485	case SCTP_SHUTDOWN:
3486		sctp_send_shutdown(stcb, net);
3487		break;
3488	case SCTP_SHUTDOWN_ACK:
3489		sctp_send_shutdown_ack(stcb, net);
3490		break;
3491	case SCTP_COOKIE_ECHO:
3492		{
3493			struct sctp_tmit_chunk *cookie;
3494
3495			cookie = NULL;
3496			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3497			    sctp_next) {
3498				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3499					break;
3500				}
3501			}
3502			if (cookie) {
3503				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3504					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3505				cookie->sent = SCTP_DATAGRAM_RESEND;
3506				sctp_stop_all_cookie_timers(stcb);
3507			}
3508		}
3509		break;
3510	case SCTP_COOKIE_ACK:
3511		sctp_send_cookie_ack(stcb);
3512		break;
3513	case SCTP_ASCONF_ACK:
3514		/* resend last asconf ack */
3515		sctp_send_asconf_ack(stcb);
3516		break;
3517	case SCTP_IFORWARD_CUM_TSN:
3518	case SCTP_FORWARD_CUM_TSN:
3519		send_forward_tsn(stcb, &stcb->asoc);
3520		break;
3521		/* can't do anything with these */
3522	case SCTP_PACKET_DROPPED:
3523	case SCTP_INITIATION_ACK:	/* this should not happen */
3524	case SCTP_HEARTBEAT_ACK:
3525	case SCTP_ABORT_ASSOCIATION:
3526	case SCTP_OPERATION_ERROR:
3527	case SCTP_SHUTDOWN_COMPLETE:
3528	case SCTP_ECN_ECHO:
3529	case SCTP_ECN_CWR:
3530	default:
3531		break;
3532	}
3533	return (0);
3534}
3535
3536void
3537sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3538{
3539	uint32_t i;
3540	uint16_t temp;
3541
3542	/*
3543	 * We set things to 0xffffffff since this is the last delivered
3544	 * sequence and we will be sending in 0 after the reset.
3545	 */
3546
3547	if (number_entries) {
3548		for (i = 0; i < number_entries; i++) {
3549			temp = ntohs(list[i]);
3550			if (temp >= stcb->asoc.streamincnt) {
3551				continue;
3552			}
3553			stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
3554		}
3555	} else {
3556		list = NULL;
3557		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3558			stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
3559		}
3560	}
3561	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3562}
3563
3564static void
3565sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3566{
3567	uint32_t i;
3568	uint16_t temp;
3569
3570	if (number_entries > 0) {
3571		for (i = 0; i < number_entries; i++) {
3572			temp = ntohs(list[i]);
3573			if (temp >= stcb->asoc.streamoutcnt) {
3574				/* no such stream */
3575				continue;
3576			}
3577			stcb->asoc.strmout[temp].next_mid_ordered = 0;
3578			stcb->asoc.strmout[temp].next_mid_unordered = 0;
3579		}
3580	} else {
3581		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3582			stcb->asoc.strmout[i].next_mid_ordered = 0;
3583			stcb->asoc.strmout[i].next_mid_unordered = 0;
3584		}
3585	}
3586	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3587}
3588
3589static void
3590sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3591{
3592	uint32_t i;
3593	uint16_t temp;
3594
3595	if (number_entries > 0) {
3596		for (i = 0; i < number_entries; i++) {
3597			temp = ntohs(list[i]);
3598			if (temp >= stcb->asoc.streamoutcnt) {
3599				/* no such stream */
3600				continue;
3601			}
3602			stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN;
3603		}
3604	} else {
3605		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3606			stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN;
3607		}
3608	}
3609}
3610
3611
3612struct sctp_stream_reset_request *
3613sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3614{
3615	struct sctp_association *asoc;
3616	struct sctp_chunkhdr *ch;
3617	struct sctp_stream_reset_request *r;
3618	struct sctp_tmit_chunk *chk;
3619	int len, clen;
3620
3621	asoc = &stcb->asoc;
3622	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3623		asoc->stream_reset_outstanding = 0;
3624		return (NULL);
3625	}
3626	if (stcb->asoc.str_reset == NULL) {
3627		asoc->stream_reset_outstanding = 0;
3628		return (NULL);
3629	}
3630	chk = stcb->asoc.str_reset;
3631	if (chk->data == NULL) {
3632		return (NULL);
3633	}
3634	if (bchk) {
3635		/* he wants a copy of the chk pointer */
3636		*bchk = chk;
3637	}
3638	clen = chk->send_size;
3639	ch = mtod(chk->data, struct sctp_chunkhdr *);
3640	r = (struct sctp_stream_reset_request *)(ch + 1);
3641	if (ntohl(r->request_seq) == seq) {
3642		/* found it */
3643		return (r);
3644	}
3645	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3646	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3647		/* move to the next one, there can only be a max of two */
3648		r = (struct sctp_stream_reset_request *)((caddr_t)r + len);
3649		if (ntohl(r->request_seq) == seq) {
3650			return (r);
3651		}
3652	}
3653	/* that seq is not here */
3654	return (NULL);
3655}
3656
3657static void
3658sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3659{
3660	struct sctp_association *asoc;
3661	struct sctp_tmit_chunk *chk;
3662
3663	asoc = &stcb->asoc;
3664	chk = asoc->str_reset;
3665	if (chk == NULL) {
3666		return;
3667	}
3668	asoc->str_reset = NULL;
3669	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb,
3670	    chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
3671	TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
3672	asoc->ctrl_queue_cnt--;
3673	if (chk->data) {
3674		sctp_m_freem(chk->data);
3675		chk->data = NULL;
3676	}
3677	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3678}
3679
3680
3681static int
3682sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3683    uint32_t seq, uint32_t action,
3684    struct sctp_stream_reset_response *respin)
3685{
3686	uint16_t type;
3687	int lparam_len;
3688	struct sctp_association *asoc = &stcb->asoc;
3689	struct sctp_tmit_chunk *chk;
3690	struct sctp_stream_reset_request *req_param;
3691	struct sctp_stream_reset_out_request *req_out_param;
3692	struct sctp_stream_reset_in_request *req_in_param;
3693	uint32_t number_entries;
3694
3695	if (asoc->stream_reset_outstanding == 0) {
3696		/* duplicate */
3697		return (0);
3698	}
3699	if (seq == stcb->asoc.str_reset_seq_out) {
3700		req_param = sctp_find_stream_reset(stcb, seq, &chk);
3701		if (req_param != NULL) {
3702			stcb->asoc.str_reset_seq_out++;
3703			type = ntohs(req_param->ph.param_type);
3704			lparam_len = ntohs(req_param->ph.param_length);
3705			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3706				int no_clear = 0;
3707
3708				req_out_param = (struct sctp_stream_reset_out_request *)req_param;
3709				number_entries = (lparam_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3710				asoc->stream_reset_out_is_outstanding = 0;
3711				if (asoc->stream_reset_outstanding)
3712					asoc->stream_reset_outstanding--;
3713				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3714					/* do it */
3715					sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams);
3716				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3717					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3718				} else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
3719					/*
3720					 * Set it up so we don't stop
3721					 * retransmitting
3722					 */
3723					asoc->stream_reset_outstanding++;
3724					stcb->asoc.str_reset_seq_out--;
3725					asoc->stream_reset_out_is_outstanding = 1;
3726					no_clear = 1;
3727				} else {
3728					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3729				}
3730				if (no_clear == 0) {
3731					sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams);
3732				}
3733			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3734				req_in_param = (struct sctp_stream_reset_in_request *)req_param;
3735				number_entries = (lparam_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3736				if (asoc->stream_reset_outstanding)
3737					asoc->stream_reset_outstanding--;
3738				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3739					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
3740					    number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3741				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3742					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
3743					    number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3744				}
3745			} else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
3746				/* Ok we now may have more streams */
3747				int num_stream;
3748
3749				num_stream = stcb->asoc.strm_pending_add_size;
3750				if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
3751					/* TSNH */
3752					num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
3753				}
3754				stcb->asoc.strm_pending_add_size = 0;
3755				if (asoc->stream_reset_outstanding)
3756					asoc->stream_reset_outstanding--;
3757				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3758					/* Put the new streams into effect */
3759					int i;
3760
3761					for (i = asoc->streamoutcnt; i < (asoc->streamoutcnt + num_stream); i++) {
3762						asoc->strmout[i].state = SCTP_STREAM_OPEN;
3763					}
3764					asoc->streamoutcnt += num_stream;
3765					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
3766				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3767					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3768					    SCTP_STREAM_CHANGE_DENIED);
3769				} else {
3770					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3771					    SCTP_STREAM_CHANGE_FAILED);
3772				}
3773			} else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
3774				if (asoc->stream_reset_outstanding)
3775					asoc->stream_reset_outstanding--;
3776				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3777					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3778					    SCTP_STREAM_CHANGE_DENIED);
3779				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3780					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3781					    SCTP_STREAM_CHANGE_FAILED);
3782				}
3783			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3784				/**
3785				 * a) Adopt the new in tsn.
3786				 * b) reset the map
3787				 * c) Adopt the new out-tsn
3788				 */
3789				struct sctp_stream_reset_response_tsn *resp;
3790				struct sctp_forward_tsn_chunk fwdtsn;
3791				int abort_flag = 0;
3792
3793				if (respin == NULL) {
3794					/* huh ? */
3795					return (0);
3796				}
3797				if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
3798					return (0);
3799				}
3800				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3801					resp = (struct sctp_stream_reset_response_tsn *)respin;
3802					asoc->stream_reset_outstanding--;
3803					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3804					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3805					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3806					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3807					if (abort_flag) {
3808						return (1);
3809					}
3810					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3811					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3812						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3813					}
3814
3815					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3816					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3817					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3818
3819					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3820					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3821
3822					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3823					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3824
3825					sctp_reset_out_streams(stcb, 0, (uint16_t *)NULL);
3826					sctp_reset_in_stream(stcb, 0, (uint16_t *)NULL);
3827					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
3828				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3829					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3830					    SCTP_ASSOC_RESET_DENIED);
3831				} else {
3832					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3833					    SCTP_ASSOC_RESET_FAILED);
3834				}
3835			}
3836			/* get rid of the request and get the request flags */
3837			if (asoc->stream_reset_outstanding == 0) {
3838				sctp_clean_up_stream_reset(stcb);
3839			}
3840		}
3841	}
3842	if (asoc->stream_reset_outstanding == 0) {
3843		sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
3844	}
3845	return (0);
3846}
3847
3848static void
3849sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3850    struct sctp_tmit_chunk *chk,
3851    struct sctp_stream_reset_in_request *req, int trunc)
3852{
3853	uint32_t seq;
3854	int len, i;
3855	int number_entries;
3856	uint16_t temp;
3857
3858	/*
3859	 * peer wants me to send a str-reset to him for my outgoing seq's if
3860	 * seq_in is right.
3861	 */
3862	struct sctp_association *asoc = &stcb->asoc;
3863
3864	seq = ntohl(req->request_seq);
3865	if (asoc->str_reset_seq_in == seq) {
3866		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3867		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
3868			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3869		} else if (trunc) {
3870			/* Can't do it, since they exceeded our buffer size  */
3871			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3872		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3873			len = ntohs(req->ph.param_length);
3874			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3875			if (number_entries) {
3876				for (i = 0; i < number_entries; i++) {
3877					temp = ntohs(req->list_of_streams[i]);
3878					if (temp >= stcb->asoc.streamoutcnt) {
3879						asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3880						goto bad_boy;
3881					}
3882					req->list_of_streams[i] = temp;
3883				}
3884				for (i = 0; i < number_entries; i++) {
3885					if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) {
3886						stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING;
3887					}
3888				}
3889			} else {
3890				/* Its all */
3891				for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3892					if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN)
3893						stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
3894				}
3895			}
3896			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3897		} else {
3898			/* Can't do it, since we have sent one out */
3899			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
3900		}
3901bad_boy:
3902		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3903		asoc->str_reset_seq_in++;
3904	} else if (asoc->str_reset_seq_in - 1 == seq) {
3905		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3906	} else if (asoc->str_reset_seq_in - 2 == seq) {
3907		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3908	} else {
3909		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3910	}
3911	sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
3912}
3913
3914static int
3915sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3916    struct sctp_tmit_chunk *chk,
3917    struct sctp_stream_reset_tsn_request *req)
3918{
3919	/* reset all in and out and update the tsn */
3920	/*
3921	 * A) reset my str-seq's on in and out. B) Select a receive next,
3922	 * and set cum-ack to it. Also process this selected number as a
3923	 * fwd-tsn as well. C) set in the response my next sending seq.
3924	 */
3925	struct sctp_forward_tsn_chunk fwdtsn;
3926	struct sctp_association *asoc = &stcb->asoc;
3927	int abort_flag = 0;
3928	uint32_t seq;
3929
3930	seq = ntohl(req->request_seq);
3931	if (asoc->str_reset_seq_in == seq) {
3932		asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
3933		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
3934			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3935		} else {
3936			fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3937			fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3938			fwdtsn.ch.chunk_flags = 0;
3939			fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3940			sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3941			if (abort_flag) {
3942				return (1);
3943			}
3944			asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3945			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3946				sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3947			}
3948			asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
3949			asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
3950			memset(asoc->mapping_array, 0, asoc->mapping_array_size);
3951			asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
3952			memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
3953			atomic_add_int(&asoc->sending_seq, 1);
3954			/* save off historical data for retrans */
3955			asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
3956			asoc->last_sending_seq[0] = asoc->sending_seq;
3957			asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
3958			asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
3959			sctp_reset_out_streams(stcb, 0, (uint16_t *)NULL);
3960			sctp_reset_in_stream(stcb, 0, (uint16_t *)NULL);
3961			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3962			sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
3963		}
3964		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3965		    asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3966		asoc->str_reset_seq_in++;
3967	} else if (asoc->str_reset_seq_in - 1 == seq) {
3968		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3969		    asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3970	} else if (asoc->str_reset_seq_in - 2 == seq) {
3971		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3972		    asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
3973	} else {
3974		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3975	}
3976	return (0);
3977}
3978
3979static void
3980sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3981    struct sctp_tmit_chunk *chk,
3982    struct sctp_stream_reset_out_request *req, int trunc)
3983{
3984	uint32_t seq, tsn;
3985	int number_entries, len;
3986	struct sctp_association *asoc = &stcb->asoc;
3987
3988	seq = ntohl(req->request_seq);
3989
3990	/* now if its not a duplicate we process it */
3991	if (asoc->str_reset_seq_in == seq) {
3992		len = ntohs(req->ph.param_length);
3993		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3994		/*
3995		 * the sender is resetting, handle the list issue.. we must
3996		 * a) verify if we can do the reset, if so no problem b) If
3997		 * we can't do the reset we must copy the request. c) queue
3998		 * it, and setup the data in processor to trigger it off
3999		 * when needed and dequeue all the queued data.
4000		 */
4001		tsn = ntohl(req->send_reset_at_tsn);
4002
4003		/* move the reset action back one */
4004		asoc->last_reset_action[1] = asoc->last_reset_action[0];
4005		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
4006			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4007		} else if (trunc) {
4008			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4009		} else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
4010			/* we can do it now */
4011			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
4012			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4013		} else {
4014			/*
4015			 * we must queue it up and thus wait for the TSN's
4016			 * to arrive that are at or before tsn
4017			 */
4018			struct sctp_stream_reset_list *liste;
4019			int siz;
4020
4021			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
4022			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
4023			    siz, SCTP_M_STRESET);
4024			if (liste == NULL) {
4025				/* gak out of memory */
4026				asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4027				sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4028				return;
4029			}
4030			liste->seq = seq;
4031			liste->tsn = tsn;
4032			liste->number_entries = number_entries;
4033			memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
4034			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
4035			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS;
4036		}
4037		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4038		asoc->str_reset_seq_in++;
4039	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4040		/*
4041		 * one seq back, just echo back last action since my
4042		 * response was lost.
4043		 */
4044		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4045	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4046		/*
4047		 * two seq back, just echo back last action since my
4048		 * response was lost.
4049		 */
4050		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4051	} else {
4052		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4053	}
4054}
4055
4056static void
4057sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4058    struct sctp_stream_reset_add_strm *str_add)
4059{
4060	/*
4061	 * Peer is requesting to add more streams. If its within our
4062	 * max-streams we will allow it.
4063	 */
4064	uint32_t num_stream, i;
4065	uint32_t seq;
4066	struct sctp_association *asoc = &stcb->asoc;
4067	struct sctp_queued_to_read *ctl, *nctl;
4068
4069	/* Get the number. */
4070	seq = ntohl(str_add->request_seq);
4071	num_stream = ntohs(str_add->number_of_streams);
4072	/* Now what would be the new total? */
4073	if (asoc->str_reset_seq_in == seq) {
4074		num_stream += stcb->asoc.streamincnt;
4075		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4076		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4077			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4078		} else if ((num_stream > stcb->asoc.max_inbound_streams) ||
4079		    (num_stream > 0xffff)) {
4080			/* We must reject it they ask for to many */
4081	denied:
4082			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4083		} else {
4084			/* Ok, we can do that :-) */
4085			struct sctp_stream_in *oldstrm;
4086
4087			/* save off the old */
4088			oldstrm = stcb->asoc.strmin;
4089			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
4090			    (num_stream * sizeof(struct sctp_stream_in)),
4091			    SCTP_M_STRMI);
4092			if (stcb->asoc.strmin == NULL) {
4093				stcb->asoc.strmin = oldstrm;
4094				goto denied;
4095			}
4096			/* copy off the old data */
4097			for (i = 0; i < stcb->asoc.streamincnt; i++) {
4098				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4099				TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4100				stcb->asoc.strmin[i].sid = i;
4101				stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
4102				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
4103				stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
4104				/* now anything on those queues? */
4105				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) {
4106					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm);
4107					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm);
4108				}
4109				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) {
4110					TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm);
4111					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm);
4112				}
4113			}
4114			/* Init the new streams */
4115			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
4116				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4117				TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4118				stcb->asoc.strmin[i].sid = i;
4119				stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
4120				stcb->asoc.strmin[i].pd_api_started = 0;
4121				stcb->asoc.strmin[i].delivery_started = 0;
4122			}
4123			SCTP_FREE(oldstrm, SCTP_M_STRMI);
4124			/* update the size */
4125			stcb->asoc.streamincnt = num_stream;
4126			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4127			sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
4128		}
4129		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4130		asoc->str_reset_seq_in++;
4131	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4132		/*
4133		 * one seq back, just echo back last action since my
4134		 * response was lost.
4135		 */
4136		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4137	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4138		/*
4139		 * two seq back, just echo back last action since my
4140		 * response was lost.
4141		 */
4142		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4143	} else {
4144		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4145
4146	}
4147}
4148
4149static void
4150sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4151    struct sctp_stream_reset_add_strm *str_add)
4152{
4153	/*
4154	 * Peer is requesting to add more streams. If its within our
4155	 * max-streams we will allow it.
4156	 */
4157	uint16_t num_stream;
4158	uint32_t seq;
4159	struct sctp_association *asoc = &stcb->asoc;
4160
4161	/* Get the number. */
4162	seq = ntohl(str_add->request_seq);
4163	num_stream = ntohs(str_add->number_of_streams);
4164	/* Now what would be the new total? */
4165	if (asoc->str_reset_seq_in == seq) {
4166		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4167		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4168			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4169		} else if (stcb->asoc.stream_reset_outstanding) {
4170			/* We must reject it we have something pending */
4171			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4172		} else {
4173			/* Ok, we can do that :-) */
4174			int mychk;
4175
4176			mychk = stcb->asoc.streamoutcnt;
4177			mychk += num_stream;
4178			if (mychk < 0x10000) {
4179				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4180				if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) {
4181					stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4182				}
4183			} else {
4184				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4185			}
4186		}
4187		sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
4188		asoc->str_reset_seq_in++;
4189	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4190		/*
4191		 * one seq back, just echo back last action since my
4192		 * response was lost.
4193		 */
4194		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4195	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4196		/*
4197		 * two seq back, just echo back last action since my
4198		 * response was lost.
4199		 */
4200		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4201	} else {
4202		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4203	}
4204}
4205
4206#ifdef __GNUC__
4207__attribute__((noinline))
4208#endif
4209static int
4210sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
4211    struct sctp_chunkhdr *ch_req)
4212{
4213	uint16_t remaining_length, param_len, ptype;
4214	struct sctp_paramhdr pstore;
4215	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
4216	uint32_t seq = 0;
4217	int num_req = 0;
4218	int trunc = 0;
4219	struct sctp_tmit_chunk *chk;
4220	struct sctp_chunkhdr *ch;
4221	struct sctp_paramhdr *ph;
4222	int ret_code = 0;
4223	int num_param = 0;
4224
4225	/* now it may be a reset or a reset-response */
4226	remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
4227
4228	/* setup for adding the response */
4229	sctp_alloc_a_chunk(stcb, chk);
4230	if (chk == NULL) {
4231		return (ret_code);
4232	}
4233	chk->copy_by_ref = 0;
4234	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
4235	chk->rec.chunk_id.can_take_data = 0;
4236	chk->flags = 0;
4237	chk->asoc = &stcb->asoc;
4238	chk->no_fr_allowed = 0;
4239	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
4240	chk->book_size_scale = 0;
4241	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4242	if (chk->data == NULL) {
4243strres_nochunk:
4244		if (chk->data) {
4245			sctp_m_freem(chk->data);
4246			chk->data = NULL;
4247		}
4248		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
4249		return (ret_code);
4250	}
4251	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
4252
4253	/* setup chunk parameters */
4254	chk->sent = SCTP_DATAGRAM_UNSENT;
4255	chk->snd_count = 0;
4256	chk->whoTo = NULL;
4257
4258	ch = mtod(chk->data, struct sctp_chunkhdr *);
4259	ch->chunk_type = SCTP_STREAM_RESET;
4260	ch->chunk_flags = 0;
4261	ch->chunk_length = htons(chk->send_size);
4262	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
4263	offset += sizeof(struct sctp_chunkhdr);
4264	while (remaining_length >= sizeof(struct sctp_paramhdr)) {
4265		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
4266		if (ph == NULL) {
4267			/* TSNH */
4268			break;
4269		}
4270		param_len = ntohs(ph->param_length);
4271		if ((param_len > remaining_length) ||
4272		    (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
4273			/* bad parameter length */
4274			break;
4275		}
4276		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
4277		    (uint8_t *)&cstore);
4278		if (ph == NULL) {
4279			/* TSNH */
4280			break;
4281		}
4282		ptype = ntohs(ph->param_type);
4283		num_param++;
4284		if (param_len > sizeof(cstore)) {
4285			trunc = 1;
4286		} else {
4287			trunc = 0;
4288		}
4289		if (num_param > SCTP_MAX_RESET_PARAMS) {
4290			/* hit the max of parameters already sorry.. */
4291			break;
4292		}
4293		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
4294			struct sctp_stream_reset_out_request *req_out;
4295
4296			if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
4297				break;
4298			}
4299			req_out = (struct sctp_stream_reset_out_request *)ph;
4300			num_req++;
4301			if (stcb->asoc.stream_reset_outstanding) {
4302				seq = ntohl(req_out->response_seq);
4303				if (seq == stcb->asoc.str_reset_seq_out) {
4304					/* implicit ack */
4305					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
4306				}
4307			}
4308			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
4309		} else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
4310			struct sctp_stream_reset_add_strm *str_add;
4311
4312			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4313				break;
4314			}
4315			str_add = (struct sctp_stream_reset_add_strm *)ph;
4316			num_req++;
4317			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
4318		} else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
4319			struct sctp_stream_reset_add_strm *str_add;
4320
4321			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4322				break;
4323			}
4324			str_add = (struct sctp_stream_reset_add_strm *)ph;
4325			num_req++;
4326			sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
4327		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
4328			struct sctp_stream_reset_in_request *req_in;
4329
4330			num_req++;
4331			req_in = (struct sctp_stream_reset_in_request *)ph;
4332			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
4333		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
4334			struct sctp_stream_reset_tsn_request *req_tsn;
4335
4336			num_req++;
4337			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
4338			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
4339				ret_code = 1;
4340				goto strres_nochunk;
4341			}
4342			/* no more */
4343			break;
4344		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
4345			struct sctp_stream_reset_response *resp;
4346			uint32_t result;
4347
4348			if (param_len < sizeof(struct sctp_stream_reset_response)) {
4349				break;
4350			}
4351			resp = (struct sctp_stream_reset_response *)ph;
4352			seq = ntohl(resp->response_seq);
4353			result = ntohl(resp->result);
4354			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4355				ret_code = 1;
4356				goto strres_nochunk;
4357			}
4358		} else {
4359			break;
4360		}
4361		offset += SCTP_SIZE32(param_len);
4362		if (remaining_length >= SCTP_SIZE32(param_len)) {
4363			remaining_length -= SCTP_SIZE32(param_len);
4364		} else {
4365			remaining_length = 0;
4366		}
4367	}
4368	if (num_req == 0) {
4369		/* we have no response free the stuff */
4370		goto strres_nochunk;
4371	}
4372	/* ok we have a chunk to link in */
4373	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4374	    chk,
4375	    sctp_next);
4376	stcb->asoc.ctrl_queue_cnt++;
4377	return (ret_code);
4378}
4379
4380/*
4381 * Handle a router or endpoints report of a packet loss, there are two ways
4382 * to handle this, either we get the whole packet and must disect it
4383 * ourselves (possibly with truncation and or corruption) or it is a summary
4384 * from a middle box that did the disectting for us.
4385 */
4386static void
4387sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4388    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4389{
4390	uint32_t bottle_bw, on_queue;
4391	uint16_t trunc_len;
4392	unsigned int chlen;
4393	unsigned int at;
4394	struct sctp_chunk_desc desc;
4395	struct sctp_chunkhdr *ch;
4396
4397	chlen = ntohs(cp->ch.chunk_length);
4398	chlen -= sizeof(struct sctp_pktdrop_chunk);
4399	/* XXX possible chlen underflow */
4400	if (chlen == 0) {
4401		ch = NULL;
4402		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4403			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4404	} else {
4405		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4406		chlen -= sizeof(struct sctphdr);
4407		/* XXX possible chlen underflow */
4408		memset(&desc, 0, sizeof(desc));
4409	}
4410	trunc_len = (uint16_t)ntohs(cp->trunc_len);
4411	if (trunc_len > limit) {
4412		trunc_len = limit;
4413	}
4414
4415	/* now the chunks themselves */
4416	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4417		desc.chunk_type = ch->chunk_type;
4418		/* get amount we need to move */
4419		at = ntohs(ch->chunk_length);
4420		if (at < sizeof(struct sctp_chunkhdr)) {
4421			/* corrupt chunk, maybe at the end? */
4422			SCTP_STAT_INCR(sctps_pdrpcrupt);
4423			break;
4424		}
4425		if (trunc_len == 0) {
4426			/* we are supposed to have all of it */
4427			if (at > chlen) {
4428				/* corrupt skip it */
4429				SCTP_STAT_INCR(sctps_pdrpcrupt);
4430				break;
4431			}
4432		} else {
4433			/* is there enough of it left ? */
4434			if (desc.chunk_type == SCTP_DATA) {
4435				if (chlen < (sizeof(struct sctp_data_chunk) +
4436				    sizeof(desc.data_bytes))) {
4437					break;
4438				}
4439			} else {
4440				if (chlen < sizeof(struct sctp_chunkhdr)) {
4441					break;
4442				}
4443			}
4444		}
4445		if (desc.chunk_type == SCTP_DATA) {
4446			/* can we get out the tsn? */
4447			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4448				SCTP_STAT_INCR(sctps_pdrpmbda);
4449
4450			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4451				/* yep */
4452				struct sctp_data_chunk *dcp;
4453				uint8_t *ddp;
4454				unsigned int iii;
4455
4456				dcp = (struct sctp_data_chunk *)ch;
4457				ddp = (uint8_t *)(dcp + 1);
4458				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4459					desc.data_bytes[iii] = ddp[iii];
4460				}
4461				desc.tsn_ifany = dcp->dp.tsn;
4462			} else {
4463				/* nope we are done. */
4464				SCTP_STAT_INCR(sctps_pdrpnedat);
4465				break;
4466			}
4467		} else {
4468			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4469				SCTP_STAT_INCR(sctps_pdrpmbct);
4470		}
4471
4472		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4473			SCTP_STAT_INCR(sctps_pdrppdbrk);
4474			break;
4475		}
4476		if (SCTP_SIZE32(at) > chlen) {
4477			break;
4478		}
4479		chlen -= SCTP_SIZE32(at);
4480		if (chlen < sizeof(struct sctp_chunkhdr)) {
4481			/* done, none left */
4482			break;
4483		}
4484		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4485	}
4486	/* Now update any rwnd --- possibly */
4487	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4488		/* From a peer, we get a rwnd report */
4489		uint32_t a_rwnd;
4490
4491		SCTP_STAT_INCR(sctps_pdrpfehos);
4492
4493		bottle_bw = ntohl(cp->bottle_bw);
4494		on_queue = ntohl(cp->current_onq);
4495		if (bottle_bw && on_queue) {
4496			/* a rwnd report is in here */
4497			if (bottle_bw > on_queue)
4498				a_rwnd = bottle_bw - on_queue;
4499			else
4500				a_rwnd = 0;
4501
4502			if (a_rwnd == 0)
4503				stcb->asoc.peers_rwnd = 0;
4504			else {
4505				if (a_rwnd > stcb->asoc.total_flight) {
4506					stcb->asoc.peers_rwnd =
4507					    a_rwnd - stcb->asoc.total_flight;
4508				} else {
4509					stcb->asoc.peers_rwnd = 0;
4510				}
4511				if (stcb->asoc.peers_rwnd <
4512				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4513					/* SWS sender side engages */
4514					stcb->asoc.peers_rwnd = 0;
4515				}
4516			}
4517		}
4518	} else {
4519		SCTP_STAT_INCR(sctps_pdrpfmbox);
4520	}
4521
4522	/* now middle boxes in sat networks get a cwnd bump */
4523	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4524	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4525	    (stcb->asoc.sat_network)) {
4526		/*
4527		 * This is debatable but for sat networks it makes sense
4528		 * Note if a T3 timer has went off, we will prohibit any
4529		 * changes to cwnd until we exit the t3 loss recovery.
4530		 */
4531		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4532		    net, cp, &bottle_bw, &on_queue);
4533	}
4534}
4535
4536/*
4537 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4538 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4539 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4540 * length of the complete packet outputs: - length: modified to remaining
4541 * length after control processing - netp: modified to new sctp_nets after
4542 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4543 * bad packet,...) otherwise return the tcb for this packet
4544 */
4545#ifdef __GNUC__
4546__attribute__((noinline))
4547#endif
4548static struct sctp_tcb *
4549sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4550    struct sockaddr *src, struct sockaddr *dst,
4551    struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4552    struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4553    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4554    uint32_t vrf_id, uint16_t port)
4555{
4556	struct sctp_association *asoc;
4557	struct mbuf *op_err;
4558	char msg[SCTP_DIAG_INFO_LEN];
4559	uint32_t vtag_in;
4560	int num_chunks = 0;	/* number of control chunks processed */
4561	uint32_t chk_length, contiguous;
4562	int ret;
4563	int abort_no_unlock = 0;
4564	int ecne_seen = 0;
4565
4566	/*
4567	 * How big should this be, and should it be alloc'd? Lets try the
4568	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4569	 * until we get into jumbo grams and such..
4570	 */
4571	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4572	int got_auth = 0;
4573	uint32_t auth_offset = 0, auth_len = 0;
4574	int auth_skipped = 0;
4575	int asconf_cnt = 0;
4576#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4577	struct socket *so;
4578#endif
4579
4580	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4581	    iphlen, *offset, length, (void *)stcb);
4582
4583	if (stcb) {
4584		SCTP_TCB_LOCK_ASSERT(stcb);
4585	}
4586	/* validate chunk header length... */
4587	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4588		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4589		    ntohs(ch->chunk_length));
4590		*offset = length;
4591		return (stcb);
4592	}
4593	/*
4594	 * validate the verification tag
4595	 */
4596	vtag_in = ntohl(sh->v_tag);
4597
4598	if (ch->chunk_type == SCTP_INITIATION) {
4599		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4600		    ntohs(ch->chunk_length), vtag_in);
4601		if (vtag_in != 0) {
4602			/* protocol error- silently discard... */
4603			SCTP_STAT_INCR(sctps_badvtag);
4604			if (stcb != NULL) {
4605				SCTP_TCB_UNLOCK(stcb);
4606			}
4607			return (NULL);
4608		}
4609	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4610		/*
4611		 * If there is no stcb, skip the AUTH chunk and process
4612		 * later after a stcb is found (to validate the lookup was
4613		 * valid.
4614		 */
4615		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4616		    (stcb == NULL) &&
4617		    (inp->auth_supported == 1)) {
4618			/* save this chunk for later processing */
4619			auth_skipped = 1;
4620			auth_offset = *offset;
4621			auth_len = ntohs(ch->chunk_length);
4622
4623			/* (temporarily) move past this chunk */
4624			*offset += SCTP_SIZE32(auth_len);
4625			if (*offset >= length) {
4626				/* no more data left in the mbuf chain */
4627				*offset = length;
4628				return (NULL);
4629			}
4630			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4631			    sizeof(struct sctp_chunkhdr), chunk_buf);
4632		}
4633		if (ch == NULL) {
4634			/* Help */
4635			*offset = length;
4636			return (stcb);
4637		}
4638		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4639			goto process_control_chunks;
4640		}
4641		/*
4642		 * first check if it's an ASCONF with an unknown src addr we
4643		 * need to look inside to find the association
4644		 */
4645		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4646			struct sctp_chunkhdr *asconf_ch = ch;
4647			uint32_t asconf_offset = 0, asconf_len = 0;
4648
4649			/* inp's refcount may be reduced */
4650			SCTP_INP_INCR_REF(inp);
4651
4652			asconf_offset = *offset;
4653			do {
4654				asconf_len = ntohs(asconf_ch->chunk_length);
4655				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4656					break;
4657				stcb = sctp_findassociation_ep_asconf(m,
4658				    *offset,
4659				    dst,
4660				    sh, &inp, netp, vrf_id);
4661				if (stcb != NULL)
4662					break;
4663				asconf_offset += SCTP_SIZE32(asconf_len);
4664				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4665				    sizeof(struct sctp_chunkhdr), chunk_buf);
4666			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4667			if (stcb == NULL) {
4668				/*
4669				 * reduce inp's refcount if not reduced in
4670				 * sctp_findassociation_ep_asconf().
4671				 */
4672				SCTP_INP_DECR_REF(inp);
4673			}
4674
4675			/* now go back and verify any auth chunk to be sure */
4676			if (auth_skipped && (stcb != NULL)) {
4677				struct sctp_auth_chunk *auth;
4678
4679				if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
4680					auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, chunk_buf);
4681					got_auth = 1;
4682					auth_skipped = 0;
4683				} else {
4684					auth = NULL;
4685				}
4686				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4687				    auth_offset)) {
4688					/* auth HMAC failed so dump it */
4689					*offset = length;
4690					return (stcb);
4691				} else {
4692					/* remaining chunks are HMAC checked */
4693					stcb->asoc.authenticated = 1;
4694				}
4695			}
4696		}
4697		if (stcb == NULL) {
4698			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4699			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4700			    msg);
4701			/* no association, so it's out of the blue... */
4702			sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
4703			    mflowtype, mflowid, inp->fibnum,
4704			    vrf_id, port);
4705			*offset = length;
4706			return (NULL);
4707		}
4708		asoc = &stcb->asoc;
4709		/* ABORT and SHUTDOWN can use either v_tag... */
4710		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4711		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4712		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4713			/* Take the T-bit always into account. */
4714			if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
4715			    (vtag_in == asoc->my_vtag)) ||
4716			    (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
4717			    (asoc->peer_vtag != htonl(0)) &&
4718			    (vtag_in == asoc->peer_vtag))) {
4719				/* this is valid */
4720			} else {
4721				/* drop this packet... */
4722				SCTP_STAT_INCR(sctps_badvtag);
4723				if (stcb != NULL) {
4724					SCTP_TCB_UNLOCK(stcb);
4725				}
4726				return (NULL);
4727			}
4728		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4729			if (vtag_in != asoc->my_vtag) {
4730				/*
4731				 * this could be a stale SHUTDOWN-ACK or the
4732				 * peer never got the SHUTDOWN-COMPLETE and
4733				 * is still hung; we have started a new asoc
4734				 * but it won't complete until the shutdown
4735				 * is completed
4736				 */
4737				if (stcb != NULL) {
4738					SCTP_TCB_UNLOCK(stcb);
4739				}
4740				snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4741				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4742				    msg);
4743				sctp_handle_ootb(m, iphlen, *offset, src, dst,
4744				    sh, inp, op_err,
4745				    mflowtype, mflowid, fibnum,
4746				    vrf_id, port);
4747				return (NULL);
4748			}
4749		} else {
4750			/* for all other chunks, vtag must match */
4751			if (vtag_in != asoc->my_vtag) {
4752				/* invalid vtag... */
4753				SCTPDBG(SCTP_DEBUG_INPUT3,
4754				    "invalid vtag: %xh, expect %xh\n",
4755				    vtag_in, asoc->my_vtag);
4756				SCTP_STAT_INCR(sctps_badvtag);
4757				if (stcb != NULL) {
4758					SCTP_TCB_UNLOCK(stcb);
4759				}
4760				*offset = length;
4761				return (NULL);
4762			}
4763		}
4764	}			/* end if !SCTP_COOKIE_ECHO */
4765	/*
4766	 * process all control chunks...
4767	 */
4768	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4769	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4770	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4771	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4772		/* implied cookie-ack.. we must have lost the ack */
4773		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4774		    *netp);
4775	}
4776
4777process_control_chunks:
4778	while (IS_SCTP_CONTROL(ch)) {
4779		/* validate chunk length */
4780		chk_length = ntohs(ch->chunk_length);
4781		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4782		    ch->chunk_type, chk_length);
4783		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4784		if (chk_length < sizeof(*ch) ||
4785		    (*offset + (int)chk_length) > length) {
4786			*offset = length;
4787			return (stcb);
4788		}
4789		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4790		/*
4791		 * INIT and INIT-ACK only gets the init ack "header" portion
4792		 * only because we don't have to process the peer's COOKIE.
4793		 * All others get a complete chunk.
4794		 */
4795		switch (ch->chunk_type) {
4796		case SCTP_INITIATION:
4797			contiguous = sizeof(struct sctp_init_chunk);
4798			break;
4799		case SCTP_INITIATION_ACK:
4800			contiguous = sizeof(struct sctp_init_ack_chunk);
4801			break;
4802		default:
4803			contiguous = min(chk_length, sizeof(chunk_buf));
4804			break;
4805		}
4806		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4807		    contiguous,
4808		    chunk_buf);
4809		if (ch == NULL) {
4810			*offset = length;
4811			if (stcb != NULL) {
4812				SCTP_TCB_UNLOCK(stcb);
4813			}
4814			return (NULL);
4815		}
4816
4817		num_chunks++;
4818		/* Save off the last place we got a control from */
4819		if (stcb != NULL) {
4820			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4821				/*
4822				 * allow last_control to be NULL if
4823				 * ASCONF... ASCONF processing will find the
4824				 * right net later
4825				 */
4826				if ((netp != NULL) && (*netp != NULL))
4827					stcb->asoc.last_control_chunk_from = *netp;
4828			}
4829		}
4830#ifdef SCTP_AUDITING_ENABLED
4831		sctp_audit_log(0xB0, ch->chunk_type);
4832#endif
4833
4834		/* check to see if this chunk required auth, but isn't */
4835		if ((stcb != NULL) &&
4836		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4837		    !stcb->asoc.authenticated) {
4838			/* "silently" ignore */
4839			SCTP_STAT_INCR(sctps_recvauthmissing);
4840			goto next_chunk;
4841		}
4842		switch (ch->chunk_type) {
4843		case SCTP_INITIATION:
4844			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4845			/* The INIT chunk must be the only chunk. */
4846			if ((num_chunks > 1) ||
4847			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4848				/* RFC 4960 requires that no ABORT is sent */
4849				*offset = length;
4850				if (stcb != NULL) {
4851					SCTP_TCB_UNLOCK(stcb);
4852				}
4853				return (NULL);
4854			}
4855			/* Honor our resource limit. */
4856			if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
4857				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
4858				sctp_abort_association(inp, stcb, m, iphlen,
4859				    src, dst, sh, op_err,
4860				    mflowtype, mflowid,
4861				    vrf_id, port);
4862				*offset = length;
4863				return (NULL);
4864			}
4865			sctp_handle_init(m, iphlen, *offset, src, dst, sh,
4866			    (struct sctp_init_chunk *)ch, inp,
4867			    stcb, *netp, &abort_no_unlock,
4868			    mflowtype, mflowid,
4869			    vrf_id, port);
4870			*offset = length;
4871			if ((!abort_no_unlock) && (stcb != NULL)) {
4872				SCTP_TCB_UNLOCK(stcb);
4873			}
4874			return (NULL);
4875			break;
4876		case SCTP_PAD_CHUNK:
4877			break;
4878		case SCTP_INITIATION_ACK:
4879			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT_ACK\n");
4880			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4881				/* We are not interested anymore */
4882				if ((stcb != NULL) && (stcb->asoc.total_output_queue_size)) {
4883					;
4884				} else {
4885					*offset = length;
4886					if (stcb != NULL) {
4887#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4888						so = SCTP_INP_SO(inp);
4889						atomic_add_int(&stcb->asoc.refcnt, 1);
4890						SCTP_TCB_UNLOCK(stcb);
4891						SCTP_SOCKET_LOCK(so, 1);
4892						SCTP_TCB_LOCK(stcb);
4893						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4894#endif
4895						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4896						    SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4897#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4898						SCTP_SOCKET_UNLOCK(so, 1);
4899#endif
4900					}
4901					return (NULL);
4902				}
4903			}
4904			/* The INIT-ACK chunk must be the only chunk. */
4905			if ((num_chunks > 1) ||
4906			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4907				*offset = length;
4908				return (stcb);
4909			}
4910			if ((netp != NULL) && (*netp != NULL)) {
4911				ret = sctp_handle_init_ack(m, iphlen, *offset,
4912				    src, dst, sh,
4913				    (struct sctp_init_ack_chunk *)ch,
4914				    stcb, *netp,
4915				    &abort_no_unlock,
4916				    mflowtype, mflowid,
4917				    vrf_id);
4918			} else {
4919				ret = -1;
4920			}
4921			*offset = length;
4922			if (abort_no_unlock) {
4923				return (NULL);
4924			}
4925			/*
4926			 * Special case, I must call the output routine to
4927			 * get the cookie echoed
4928			 */
4929			if ((stcb != NULL) && (ret == 0)) {
4930				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4931			}
4932			return (stcb);
4933			break;
4934		case SCTP_SELECTIVE_ACK:
4935		case SCTP_NR_SELECTIVE_ACK:
4936			{
4937				int abort_now = 0;
4938				uint32_t a_rwnd, cum_ack;
4939				uint16_t num_seg, num_nr_seg, num_dup;
4940				uint8_t flags;
4941				int offset_seg, offset_dup;
4942
4943				SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
4944				    ch->chunk_type == SCTP_SELECTIVE_ACK ? "SCTP_SACK" : "SCTP_NR_SACK");
4945				SCTP_STAT_INCR(sctps_recvsacks);
4946				if (stcb == NULL) {
4947					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing %s chunk\n",
4948					    (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK");
4949					break;
4950				}
4951				if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
4952					if (chk_length < sizeof(struct sctp_sack_chunk)) {
4953						SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
4954						break;
4955					}
4956				} else {
4957					if (stcb->asoc.nrsack_supported == 0) {
4958						goto unknown_chunk;
4959					}
4960					if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
4961						SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR_SACK chunk, too small\n");
4962						break;
4963					}
4964				}
4965				if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4966					/*-
4967					 * If we have sent a shutdown-ack, we will pay no
4968					 * attention to a sack sent in to us since
4969					 * we don't care anymore.
4970					 */
4971					break;
4972				}
4973				flags = ch->chunk_flags;
4974				if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
4975					struct sctp_sack_chunk *sack;
4976
4977					sack = (struct sctp_sack_chunk *)ch;
4978					cum_ack = ntohl(sack->sack.cum_tsn_ack);
4979					num_seg = ntohs(sack->sack.num_gap_ack_blks);
4980					num_nr_seg = 0;
4981					num_dup = ntohs(sack->sack.num_dup_tsns);
4982					a_rwnd = ntohl(sack->sack.a_rwnd);
4983					if (sizeof(struct sctp_sack_chunk) +
4984					    num_seg * sizeof(struct sctp_gap_ack_block) +
4985					    num_dup * sizeof(uint32_t) != chk_length) {
4986						SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
4987						break;
4988					}
4989					offset_seg = *offset + sizeof(struct sctp_sack_chunk);
4990					offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4991				} else {
4992					struct sctp_nr_sack_chunk *nr_sack;
4993
4994					nr_sack = (struct sctp_nr_sack_chunk *)ch;
4995					cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4996					num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4997					num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4998					num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
4999					a_rwnd = ntohl(nr_sack->nr_sack.a_rwnd);
5000					if (sizeof(struct sctp_nr_sack_chunk) +
5001					    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
5002					    num_dup * sizeof(uint32_t) != chk_length) {
5003						SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
5004						break;
5005					}
5006					offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
5007					offset_dup = offset_seg + (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block);
5008				}
5009				SCTPDBG(SCTP_DEBUG_INPUT3, "%s process cum_ack:%x num_seg:%d a_rwnd:%d\n",
5010				    (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK",
5011				    cum_ack, num_seg, a_rwnd);
5012				stcb->asoc.seen_a_sack_this_pkt = 1;
5013				if ((stcb->asoc.pr_sctp_cnt == 0) &&
5014				    (num_seg == 0) && (num_nr_seg == 0) &&
5015				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
5016				    (stcb->asoc.saw_sack_with_frags == 0) &&
5017				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
5018				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
5019					/*
5020					 * We have a SIMPLE sack having no
5021					 * prior segments and data on sent
5022					 * queue to be acked. Use the faster
5023					 * path sack processing. We also
5024					 * allow window update sacks with no
5025					 * missing segments to go this way
5026					 * too.
5027					 */
5028					sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
5029					    &abort_now, ecne_seen);
5030				} else {
5031					if ((netp != NULL) && (*netp != NULL)) {
5032						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
5033						    num_seg, num_nr_seg, num_dup, &abort_now, flags,
5034						    cum_ack, a_rwnd, ecne_seen);
5035					}
5036				}
5037				if (abort_now) {
5038					/* ABORT signal from sack processing */
5039					*offset = length;
5040					return (NULL);
5041				}
5042				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
5043				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
5044				    (stcb->asoc.stream_queue_cnt == 0)) {
5045					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
5046				}
5047				break;
5048			}
5049		case SCTP_HEARTBEAT_REQUEST:
5050			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
5051			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5052				SCTP_STAT_INCR(sctps_recvheartbeat);
5053				sctp_send_heartbeat_ack(stcb, m, *offset,
5054				    chk_length, *netp);
5055			}
5056			break;
5057		case SCTP_HEARTBEAT_ACK:
5058			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT_ACK\n");
5059			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
5060				/* Its not ours */
5061				*offset = length;
5062				return (stcb);
5063			}
5064			SCTP_STAT_INCR(sctps_recvheartbeatack);
5065			if ((netp != NULL) && (*netp != NULL)) {
5066				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
5067				    stcb, *netp);
5068			}
5069			break;
5070		case SCTP_ABORT_ASSOCIATION:
5071			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
5072			    (void *)stcb);
5073			*offset = length;
5074			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5075				if (sctp_handle_abort((struct sctp_abort_chunk *)ch, stcb, *netp)) {
5076					return (NULL);
5077				} else {
5078					return (stcb);
5079				}
5080			} else {
5081				return (NULL);
5082			}
5083			break;
5084		case SCTP_SHUTDOWN:
5085			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
5086			    (void *)stcb);
5087			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
5088				*offset = length;
5089				return (stcb);
5090			}
5091			if ((netp != NULL) && (*netp != NULL)) {
5092				int abort_flag = 0;
5093
5094				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
5095				    stcb, *netp, &abort_flag);
5096				if (abort_flag) {
5097					*offset = length;
5098					return (NULL);
5099				}
5100			}
5101			break;
5102		case SCTP_SHUTDOWN_ACK:
5103			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_ACK, stcb %p\n", (void *)stcb);
5104			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5105				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
5106			}
5107			*offset = length;
5108			return (NULL);
5109			break;
5110		case SCTP_OPERATION_ERROR:
5111			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP_ERR\n");
5112			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL) &&
5113			    sctp_handle_error(ch, stcb, *netp, contiguous) < 0) {
5114				*offset = length;
5115				return (NULL);
5116			}
5117			break;
5118		case SCTP_COOKIE_ECHO:
5119			SCTPDBG(SCTP_DEBUG_INPUT3,
5120			    "SCTP_COOKIE_ECHO, stcb %p\n", (void *)stcb);
5121			if ((stcb != NULL) && (stcb->asoc.total_output_queue_size > 0)) {
5122				;
5123			} else {
5124				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5125					/* We are not interested anymore */
5126			abend:
5127					if (stcb != NULL) {
5128						SCTP_TCB_UNLOCK(stcb);
5129					}
5130					*offset = length;
5131					return (NULL);
5132				}
5133			}
5134			/*
5135			 * First are we accepting? We do this again here
5136			 * since it is possible that a previous endpoint WAS
5137			 * listening responded to a INIT-ACK and then
5138			 * closed. We opened and bound.. and are now no
5139			 * longer listening.
5140			 */
5141
5142			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
5143				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
5144				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
5145					op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5146					sctp_abort_association(inp, stcb, m, iphlen,
5147					    src, dst, sh, op_err,
5148					    mflowtype, mflowid,
5149					    vrf_id, port);
5150				}
5151				*offset = length;
5152				return (NULL);
5153			} else {
5154				struct mbuf *ret_buf;
5155				struct sctp_inpcb *linp;
5156				struct sctp_tmit_chunk *chk;
5157
5158				if (stcb) {
5159					linp = NULL;
5160				} else {
5161					linp = inp;
5162				}
5163
5164				if (linp != NULL) {
5165					SCTP_ASOC_CREATE_LOCK(linp);
5166					if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5167					    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5168						SCTP_ASOC_CREATE_UNLOCK(linp);
5169						goto abend;
5170					}
5171				}
5172
5173				if (netp != NULL) {
5174					struct sctp_tcb *locked_stcb;
5175
5176					locked_stcb = stcb;
5177					ret_buf =
5178					    sctp_handle_cookie_echo(m, iphlen,
5179					    *offset,
5180					    src, dst,
5181					    sh,
5182					    (struct sctp_cookie_echo_chunk *)ch,
5183					    &inp, &stcb, netp,
5184					    auth_skipped,
5185					    auth_offset,
5186					    auth_len,
5187					    &locked_stcb,
5188					    mflowtype,
5189					    mflowid,
5190					    vrf_id,
5191					    port);
5192					if ((locked_stcb != NULL) && (locked_stcb != stcb)) {
5193						SCTP_TCB_UNLOCK(locked_stcb);
5194					}
5195					if (stcb != NULL) {
5196						SCTP_TCB_LOCK_ASSERT(stcb);
5197					}
5198				} else {
5199					ret_buf = NULL;
5200				}
5201				if (linp != NULL) {
5202					SCTP_ASOC_CREATE_UNLOCK(linp);
5203				}
5204				if (ret_buf == NULL) {
5205					if (stcb != NULL) {
5206						SCTP_TCB_UNLOCK(stcb);
5207					}
5208					SCTPDBG(SCTP_DEBUG_INPUT3,
5209					    "GAK, null buffer\n");
5210					*offset = length;
5211					return (NULL);
5212				}
5213				/* if AUTH skipped, see if it verified... */
5214				if (auth_skipped) {
5215					got_auth = 1;
5216					auth_skipped = 0;
5217				}
5218				/* Restart the timer if we have pending data */
5219				TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
5220					if (chk->whoTo != NULL) {
5221						break;
5222					}
5223				}
5224				if (chk != NULL) {
5225					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
5226				}
5227			}
5228			break;
5229		case SCTP_COOKIE_ACK:
5230			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE_ACK, stcb %p\n", (void *)stcb);
5231			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5232				return (stcb);
5233			}
5234			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5235				/* We are not interested anymore */
5236				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5237					;
5238				} else if (stcb) {
5239#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5240					so = SCTP_INP_SO(inp);
5241					atomic_add_int(&stcb->asoc.refcnt, 1);
5242					SCTP_TCB_UNLOCK(stcb);
5243					SCTP_SOCKET_LOCK(so, 1);
5244					SCTP_TCB_LOCK(stcb);
5245					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5246#endif
5247					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5248					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5249#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5250					SCTP_SOCKET_UNLOCK(so, 1);
5251#endif
5252					*offset = length;
5253					return (NULL);
5254				}
5255			}
5256			if ((netp != NULL) && (*netp != NULL)) {
5257				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
5258			}
5259			break;
5260		case SCTP_ECN_ECHO:
5261			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_ECHO\n");
5262			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5263				/* Its not ours */
5264				*offset = length;
5265				return (stcb);
5266			}
5267			if (stcb->asoc.ecn_supported == 0) {
5268				goto unknown_chunk;
5269			}
5270			sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, stcb);
5271			ecne_seen = 1;
5272			break;
5273		case SCTP_ECN_CWR:
5274			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_CWR\n");
5275			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5276				*offset = length;
5277				return (stcb);
5278			}
5279			if (stcb->asoc.ecn_supported == 0) {
5280				goto unknown_chunk;
5281			}
5282			sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5283			break;
5284		case SCTP_SHUTDOWN_COMPLETE:
5285			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_COMPLETE, stcb %p\n", (void *)stcb);
5286			/* must be first and only chunk */
5287			if ((num_chunks > 1) ||
5288			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5289				*offset = length;
5290				return (stcb);
5291			}
5292			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5293				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5294				    stcb, *netp);
5295			}
5296			*offset = length;
5297			return (NULL);
5298			break;
5299		case SCTP_ASCONF:
5300			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5301			if (stcb != NULL) {
5302				if (stcb->asoc.asconf_supported == 0) {
5303					goto unknown_chunk;
5304				}
5305				sctp_handle_asconf(m, *offset, src,
5306				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5307				asconf_cnt++;
5308			}
5309			break;
5310		case SCTP_ASCONF_ACK:
5311			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF_ACK\n");
5312			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5313				/* Its not ours */
5314				*offset = length;
5315				return (stcb);
5316			}
5317			if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5318				if (stcb->asoc.asconf_supported == 0) {
5319					goto unknown_chunk;
5320				}
5321				/* He's alive so give him credit */
5322				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5323					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5324					    stcb->asoc.overall_error_count,
5325					    0,
5326					    SCTP_FROM_SCTP_INPUT,
5327					    __LINE__);
5328				}
5329				stcb->asoc.overall_error_count = 0;
5330				sctp_handle_asconf_ack(m, *offset,
5331				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5332				if (abort_no_unlock)
5333					return (NULL);
5334			}
5335			break;
5336		case SCTP_FORWARD_CUM_TSN:
5337		case SCTP_IFORWARD_CUM_TSN:
5338			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD_TSN\n");
5339			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5340				/* Its not ours */
5341				*offset = length;
5342				return (stcb);
5343			}
5344
5345			if (stcb != NULL) {
5346				int abort_flag = 0;
5347
5348				if (stcb->asoc.prsctp_supported == 0) {
5349					goto unknown_chunk;
5350				}
5351				*fwd_tsn_seen = 1;
5352				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5353					/* We are not interested anymore */
5354#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5355					so = SCTP_INP_SO(inp);
5356					atomic_add_int(&stcb->asoc.refcnt, 1);
5357					SCTP_TCB_UNLOCK(stcb);
5358					SCTP_SOCKET_LOCK(so, 1);
5359					SCTP_TCB_LOCK(stcb);
5360					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5361#endif
5362					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5363					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_31);
5364#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5365					SCTP_SOCKET_UNLOCK(so, 1);
5366#endif
5367					*offset = length;
5368					return (NULL);
5369				}
5370				/*
5371				 * For sending a SACK this looks like DATA
5372				 * chunks.
5373				 */
5374				stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
5375				sctp_handle_forward_tsn(stcb,
5376				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5377				if (abort_flag) {
5378					*offset = length;
5379					return (NULL);
5380				}
5381			}
5382			break;
5383		case SCTP_STREAM_RESET:
5384			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5385			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5386				/* Its not ours */
5387				*offset = length;
5388				return (stcb);
5389			}
5390			if (stcb->asoc.reconfig_supported == 0) {
5391				goto unknown_chunk;
5392			}
5393			if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
5394				/* stop processing */
5395				*offset = length;
5396				return (NULL);
5397			}
5398			break;
5399		case SCTP_PACKET_DROPPED:
5400			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5401			/* re-get it all please */
5402			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5403				/* Its not ours */
5404				*offset = length;
5405				return (stcb);
5406			}
5407
5408			if ((ch != NULL) && (stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5409				if (stcb->asoc.pktdrop_supported == 0) {
5410					goto unknown_chunk;
5411				}
5412				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5413				    stcb, *netp,
5414				    min(chk_length, contiguous));
5415			}
5416			break;
5417		case SCTP_AUTHENTICATION:
5418			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5419			if (stcb == NULL) {
5420				/* save the first AUTH for later processing */
5421				if (auth_skipped == 0) {
5422					auth_offset = *offset;
5423					auth_len = chk_length;
5424					auth_skipped = 1;
5425				}
5426				/* skip this chunk (temporarily) */
5427				goto next_chunk;
5428			}
5429			if (stcb->asoc.auth_supported == 0) {
5430				goto unknown_chunk;
5431			}
5432			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5433			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5434			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5435				/* Its not ours */
5436				*offset = length;
5437				return (stcb);
5438			}
5439			if (got_auth == 1) {
5440				/* skip this chunk... it's already auth'd */
5441				goto next_chunk;
5442			}
5443			got_auth = 1;
5444			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5445			    m, *offset)) {
5446				/* auth HMAC failed so dump the packet */
5447				*offset = length;
5448				return (stcb);
5449			} else {
5450				/* remaining chunks are HMAC checked */
5451				stcb->asoc.authenticated = 1;
5452			}
5453			break;
5454
5455		default:
5456	unknown_chunk:
5457			/* it's an unknown chunk! */
5458			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5459				struct sctp_gen_error_cause *cause;
5460				int len;
5461
5462				op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
5463				    0, M_NOWAIT, 1, MT_DATA);
5464				if (op_err != NULL) {
5465					len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset));
5466					cause = mtod(op_err, struct sctp_gen_error_cause *);
5467					cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5468					cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause)));
5469					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5470					SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
5471					if (SCTP_BUF_NEXT(op_err) != NULL) {
5472#ifdef SCTP_MBUF_LOGGING
5473						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5474							sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY);
5475						}
5476#endif
5477						sctp_queue_op_err(stcb, op_err);
5478					} else {
5479						sctp_m_freem(op_err);
5480					}
5481				}
5482			}
5483			if ((ch->chunk_type & 0x80) == 0) {
5484				/* discard this packet */
5485				*offset = length;
5486				return (stcb);
5487			}	/* else skip this bad chunk and continue... */
5488			break;
5489		}		/* switch (ch->chunk_type) */
5490
5491
5492next_chunk:
5493		/* get the next chunk */
5494		*offset += SCTP_SIZE32(chk_length);
5495		if (*offset >= length) {
5496			/* no more data left in the mbuf chain */
5497			break;
5498		}
5499		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5500		    sizeof(struct sctp_chunkhdr), chunk_buf);
5501		if (ch == NULL) {
5502			*offset = length;
5503			return (stcb);
5504		}
5505	}			/* while */
5506
5507	if ((asconf_cnt > 0) && (stcb != NULL)) {
5508		sctp_send_asconf_ack(stcb);
5509	}
5510	return (stcb);
5511}
5512
5513
5514/*
5515 * common input chunk processing (v4 and v6)
5516 */
5517void
5518sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
5519    struct sockaddr *src, struct sockaddr *dst,
5520    struct sctphdr *sh, struct sctp_chunkhdr *ch,
5521    uint8_t compute_crc,
5522    uint8_t ecn_bits,
5523    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
5524    uint32_t vrf_id, uint16_t port)
5525{
5526	uint32_t high_tsn;
5527	int fwd_tsn_seen = 0, data_processed = 0;
5528	struct mbuf *m = *mm, *op_err;
5529	char msg[SCTP_DIAG_INFO_LEN];
5530	int un_sent;
5531	int cnt_ctrl_ready = 0;
5532	struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
5533	struct sctp_tcb *stcb = NULL;
5534	struct sctp_nets *net = NULL;
5535
5536	SCTP_STAT_INCR(sctps_recvdatagrams);
5537#ifdef SCTP_AUDITING_ENABLED
5538	sctp_audit_log(0xE0, 1);
5539	sctp_auditing(0, inp, stcb, net);
5540#endif
5541	if (compute_crc != 0) {
5542		uint32_t check, calc_check;
5543
5544		check = sh->checksum;
5545		sh->checksum = 0;
5546		calc_check = sctp_calculate_cksum(m, iphlen);
5547		sh->checksum = check;
5548		if (calc_check != check) {
5549			SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5550			    calc_check, check, (void *)m, length, iphlen);
5551			stcb = sctp_findassociation_addr(m, offset, src, dst,
5552			    sh, ch, &inp, &net, vrf_id);
5553#if defined(INET) || defined(INET6)
5554			if ((ch->chunk_type != SCTP_INITIATION) &&
5555			    (net != NULL) && (net->port != port)) {
5556				if (net->port == 0) {
5557					/* UDP encapsulation turned on. */
5558					net->mtu -= sizeof(struct udphdr);
5559					if (stcb->asoc.smallest_mtu > net->mtu) {
5560						sctp_pathmtu_adjustment(stcb, net->mtu);
5561					}
5562				} else if (port == 0) {
5563					/* UDP encapsulation turned off. */
5564					net->mtu += sizeof(struct udphdr);
5565					/* XXX Update smallest_mtu */
5566				}
5567				net->port = port;
5568			}
5569#endif
5570			if (net != NULL) {
5571				net->flowtype = mflowtype;
5572				net->flowid = mflowid;
5573			}
5574			if ((inp != NULL) && (stcb != NULL)) {
5575				sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
5576				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5577			} else if ((inp != NULL) && (stcb == NULL)) {
5578				inp_decr = inp;
5579			}
5580			SCTP_STAT_INCR(sctps_badsum);
5581			SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5582			goto out;
5583		}
5584	}
5585	/* Destination port of 0 is illegal, based on RFC4960. */
5586	if (sh->dest_port == 0) {
5587		SCTP_STAT_INCR(sctps_hdrops);
5588		goto out;
5589	}
5590	stcb = sctp_findassociation_addr(m, offset, src, dst,
5591	    sh, ch, &inp, &net, vrf_id);
5592#if defined(INET) || defined(INET6)
5593	if ((ch->chunk_type != SCTP_INITIATION) &&
5594	    (net != NULL) && (net->port != port)) {
5595		if (net->port == 0) {
5596			/* UDP encapsulation turned on. */
5597			net->mtu -= sizeof(struct udphdr);
5598			if (stcb->asoc.smallest_mtu > net->mtu) {
5599				sctp_pathmtu_adjustment(stcb, net->mtu);
5600			}
5601		} else if (port == 0) {
5602			/* UDP encapsulation turned off. */
5603			net->mtu += sizeof(struct udphdr);
5604			/* XXX Update smallest_mtu */
5605		}
5606		net->port = port;
5607	}
5608#endif
5609	if (net != NULL) {
5610		net->flowtype = mflowtype;
5611		net->flowid = mflowid;
5612	}
5613	if (inp == NULL) {
5614		SCTP_STAT_INCR(sctps_noport);
5615		if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
5616			goto out;
5617		}
5618		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5619			sctp_send_shutdown_complete2(src, dst, sh,
5620			    mflowtype, mflowid, fibnum,
5621			    vrf_id, port);
5622			goto out;
5623		}
5624		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5625			goto out;
5626		}
5627		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
5628			if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
5629			    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
5630			    (ch->chunk_type != SCTP_INIT))) {
5631				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5632				    "Out of the blue");
5633				sctp_send_abort(m, iphlen, src, dst,
5634				    sh, 0, op_err,
5635				    mflowtype, mflowid, fibnum,
5636				    vrf_id, port);
5637			}
5638		}
5639		goto out;
5640	} else if (stcb == NULL) {
5641		inp_decr = inp;
5642	}
5643	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5644	    (void *)m, iphlen, offset, length, (void *)stcb);
5645	if (stcb) {
5646		/* always clear this before beginning a packet */
5647		stcb->asoc.authenticated = 0;
5648		stcb->asoc.seen_a_sack_this_pkt = 0;
5649		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5650		    (void *)stcb, stcb->asoc.state);
5651
5652		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5653		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5654			/*-
5655			 * If we hit here, we had a ref count
5656			 * up when the assoc was aborted and the
5657			 * timer is clearing out the assoc, we should
5658			 * NOT respond to any packet.. its OOTB.
5659			 */
5660			SCTP_TCB_UNLOCK(stcb);
5661			stcb = NULL;
5662			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5663			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5664			    msg);
5665			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5666			    mflowtype, mflowid, inp->fibnum,
5667			    vrf_id, port);
5668			goto out;
5669		}
5670
5671	}
5672	if (IS_SCTP_CONTROL(ch)) {
5673		/* process the control portion of the SCTP packet */
5674		/* sa_ignore NO_NULL_CHK */
5675		stcb = sctp_process_control(m, iphlen, &offset, length,
5676		    src, dst, sh, ch,
5677		    inp, stcb, &net, &fwd_tsn_seen,
5678		    mflowtype, mflowid, fibnum,
5679		    vrf_id, port);
5680		if (stcb) {
5681			/*
5682			 * This covers us if the cookie-echo was there and
5683			 * it changes our INP.
5684			 */
5685			inp = stcb->sctp_ep;
5686#if defined(INET) || defined(INET6)
5687			if ((ch->chunk_type != SCTP_INITIATION) &&
5688			    (net != NULL) && (net->port != port)) {
5689				if (net->port == 0) {
5690					/* UDP encapsulation turned on. */
5691					net->mtu -= sizeof(struct udphdr);
5692					if (stcb->asoc.smallest_mtu > net->mtu) {
5693						sctp_pathmtu_adjustment(stcb, net->mtu);
5694					}
5695				} else if (port == 0) {
5696					/* UDP encapsulation turned off. */
5697					net->mtu += sizeof(struct udphdr);
5698					/* XXX Update smallest_mtu */
5699				}
5700				net->port = port;
5701			}
5702#endif
5703		}
5704	} else {
5705		/*
5706		 * no control chunks, so pre-process DATA chunks (these
5707		 * checks are taken care of by control processing)
5708		 */
5709
5710		/*
5711		 * if DATA only packet, and auth is required, then punt...
5712		 * can't have authenticated without any AUTH (control)
5713		 * chunks
5714		 */
5715		if ((stcb != NULL) &&
5716		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5717			/* "silently" ignore */
5718			SCTP_STAT_INCR(sctps_recvauthmissing);
5719			goto out;
5720		}
5721		if (stcb == NULL) {
5722			/* out of the blue DATA chunk */
5723			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5724			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5725			    msg);
5726			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5727			    mflowtype, mflowid, fibnum,
5728			    vrf_id, port);
5729			goto out;
5730		}
5731		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5732			/* v_tag mismatch! */
5733			SCTP_STAT_INCR(sctps_badvtag);
5734			goto out;
5735		}
5736	}
5737
5738	if (stcb == NULL) {
5739		/*
5740		 * no valid TCB for this packet, or we found it's a bad
5741		 * packet while processing control, or we're done with this
5742		 * packet (done or skip rest of data), so we drop it...
5743		 */
5744		goto out;
5745	}
5746
5747	/*
5748	 * DATA chunk processing
5749	 */
5750	/* plow through the data chunks while length > offset */
5751
5752	/*
5753	 * Rest should be DATA only.  Check authentication state if AUTH for
5754	 * DATA is required.
5755	 */
5756	if ((length > offset) &&
5757	    (stcb != NULL) &&
5758	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5759	    !stcb->asoc.authenticated) {
5760		/* "silently" ignore */
5761		SCTP_STAT_INCR(sctps_recvauthmissing);
5762		SCTPDBG(SCTP_DEBUG_AUTH1,
5763		    "Data chunk requires AUTH, skipped\n");
5764		goto trigger_send;
5765	}
5766	if (length > offset) {
5767		int retval;
5768
5769		/*
5770		 * First check to make sure our state is correct. We would
5771		 * not get here unless we really did have a tag, so we don't
5772		 * abort if this happens, just dump the chunk silently.
5773		 */
5774		switch (SCTP_GET_STATE(stcb)) {
5775		case SCTP_STATE_COOKIE_ECHOED:
5776			/*
5777			 * we consider data with valid tags in this state
5778			 * shows us the cookie-ack was lost. Imply it was
5779			 * there.
5780			 */
5781			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5782			break;
5783		case SCTP_STATE_COOKIE_WAIT:
5784			/*
5785			 * We consider OOTB any data sent during asoc setup.
5786			 */
5787			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5788			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5789			    msg);
5790			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5791			    mflowtype, mflowid, inp->fibnum,
5792			    vrf_id, port);
5793			goto out;
5794			/* sa_ignore NOTREACHED */
5795			break;
5796		case SCTP_STATE_EMPTY:	/* should not happen */
5797		case SCTP_STATE_INUSE:	/* should not happen */
5798		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5799		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5800		default:
5801			goto out;
5802			/* sa_ignore NOTREACHED */
5803			break;
5804		case SCTP_STATE_OPEN:
5805		case SCTP_STATE_SHUTDOWN_SENT:
5806			break;
5807		}
5808		/* plow through the data chunks while length > offset */
5809		retval = sctp_process_data(mm, iphlen, &offset, length,
5810		    inp, stcb, net, &high_tsn);
5811		if (retval == 2) {
5812			/*
5813			 * The association aborted, NO UNLOCK needed since
5814			 * the association is destroyed.
5815			 */
5816			stcb = NULL;
5817			goto out;
5818		}
5819		data_processed = 1;
5820		/*
5821		 * Anything important needs to have been m_copy'ed in
5822		 * process_data
5823		 */
5824	}
5825
5826	/* take care of ecn */
5827	if ((data_processed == 1) &&
5828	    (stcb->asoc.ecn_supported == 1) &&
5829	    ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
5830		/* Yep, we need to add a ECNE */
5831		sctp_send_ecn_echo(stcb, net, high_tsn);
5832	}
5833
5834	if ((data_processed == 0) && (fwd_tsn_seen)) {
5835		int was_a_gap;
5836		uint32_t highest_tsn;
5837
5838		if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
5839			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
5840		} else {
5841			highest_tsn = stcb->asoc.highest_tsn_inside_map;
5842		}
5843		was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
5844		stcb->asoc.send_sack = 1;
5845		sctp_sack_check(stcb, was_a_gap);
5846	} else if (fwd_tsn_seen) {
5847		stcb->asoc.send_sack = 1;
5848	}
5849	/* trigger send of any chunks in queue... */
5850trigger_send:
5851#ifdef SCTP_AUDITING_ENABLED
5852	sctp_audit_log(0xE0, 2);
5853	sctp_auditing(1, inp, stcb, net);
5854#endif
5855	SCTPDBG(SCTP_DEBUG_INPUT1,
5856	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5857	    stcb->asoc.peers_rwnd,
5858	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5859	    stcb->asoc.total_flight);
5860	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5861	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
5862		cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
5863	}
5864	if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) ||
5865	    cnt_ctrl_ready ||
5866	    stcb->asoc.trigger_reset ||
5867	    ((un_sent) &&
5868	    (stcb->asoc.peers_rwnd > 0 ||
5869	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5870		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5871		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5872		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5873	}
5874#ifdef SCTP_AUDITING_ENABLED
5875	sctp_audit_log(0xE0, 3);
5876	sctp_auditing(2, inp, stcb, net);
5877#endif
5878out:
5879	if (stcb != NULL) {
5880		SCTP_TCB_UNLOCK(stcb);
5881	}
5882	if (inp_decr != NULL) {
5883		/* reduce ref-count */
5884		SCTP_INP_WLOCK(inp_decr);
5885		SCTP_INP_DECR_REF(inp_decr);
5886		SCTP_INP_WUNLOCK(inp_decr);
5887	}
5888	return;
5889}
5890
5891#ifdef INET
5892void
5893sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
5894{
5895	struct mbuf *m;
5896	int iphlen;
5897	uint32_t vrf_id = 0;
5898	uint8_t ecn_bits;
5899	struct sockaddr_in src, dst;
5900	struct ip *ip;
5901	struct sctphdr *sh;
5902	struct sctp_chunkhdr *ch;
5903	int length, offset;
5904	uint8_t compute_crc;
5905	uint32_t mflowid;
5906	uint8_t mflowtype;
5907	uint16_t fibnum;
5908
5909	iphlen = off;
5910	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5911		SCTP_RELEASE_PKT(i_pak);
5912		return;
5913	}
5914	m = SCTP_HEADER_TO_CHAIN(i_pak);
5915#ifdef SCTP_MBUF_LOGGING
5916	/* Log in any input mbufs */
5917	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5918		sctp_log_mbc(m, SCTP_MBUF_INPUT);
5919	}
5920#endif
5921#ifdef SCTP_PACKET_LOGGING
5922	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
5923		sctp_packet_log(m);
5924	}
5925#endif
5926	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
5927	    "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
5928	    m->m_pkthdr.len,
5929	    if_name(m->m_pkthdr.rcvif),
5930	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
5931	mflowid = m->m_pkthdr.flowid;
5932	mflowtype = M_HASHTYPE_GET(m);
5933	fibnum = M_GETFIB(m);
5934	SCTP_STAT_INCR(sctps_recvpackets);
5935	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5936	/* Get IP, SCTP, and first chunk header together in the first mbuf. */
5937	offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5938	if (SCTP_BUF_LEN(m) < offset) {
5939		if ((m = m_pullup(m, offset)) == NULL) {
5940			SCTP_STAT_INCR(sctps_hdrops);
5941			return;
5942		}
5943	}
5944	ip = mtod(m, struct ip *);
5945	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5946	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
5947	offset -= sizeof(struct sctp_chunkhdr);
5948	memset(&src, 0, sizeof(struct sockaddr_in));
5949	src.sin_family = AF_INET;
5950	src.sin_len = sizeof(struct sockaddr_in);
5951	src.sin_port = sh->src_port;
5952	src.sin_addr = ip->ip_src;
5953	memset(&dst, 0, sizeof(struct sockaddr_in));
5954	dst.sin_family = AF_INET;
5955	dst.sin_len = sizeof(struct sockaddr_in);
5956	dst.sin_port = sh->dest_port;
5957	dst.sin_addr = ip->ip_dst;
5958	length = ntohs(ip->ip_len);
5959	/* Validate mbuf chain length with IP payload length. */
5960	if (SCTP_HEADER_LEN(m) != length) {
5961		SCTPDBG(SCTP_DEBUG_INPUT1,
5962		    "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
5963		SCTP_STAT_INCR(sctps_hdrops);
5964		goto out;
5965	}
5966	/* SCTP does not allow broadcasts or multicasts */
5967	if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
5968		goto out;
5969	}
5970	if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
5971		goto out;
5972	}
5973	ecn_bits = ip->ip_tos;
5974	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
5975		SCTP_STAT_INCR(sctps_recvhwcrc);
5976		compute_crc = 0;
5977	} else {
5978		SCTP_STAT_INCR(sctps_recvswcrc);
5979		compute_crc = 1;
5980	}
5981	sctp_common_input_processing(&m, iphlen, offset, length,
5982	    (struct sockaddr *)&src,
5983	    (struct sockaddr *)&dst,
5984	    sh, ch,
5985	    compute_crc,
5986	    ecn_bits,
5987	    mflowtype, mflowid, fibnum,
5988	    vrf_id, port);
5989out:
5990	if (m) {
5991		sctp_m_freem(m);
5992	}
5993	return;
5994}
5995
5996#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
5997extern int *sctp_cpuarry;
5998#endif
5999
6000int
6001sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
6002{
6003	struct mbuf *m;
6004	int off;
6005
6006	m = *mp;
6007	off = *offp;
6008#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6009	if (mp_ncpus > 1) {
6010		struct ip *ip;
6011		struct sctphdr *sh;
6012		int offset;
6013		int cpu_to_use;
6014		uint32_t flowid, tag;
6015
6016		if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
6017			flowid = m->m_pkthdr.flowid;
6018		} else {
6019			/*
6020			 * No flow id built by lower layers fix it so we
6021			 * create one.
6022			 */
6023			offset = off + sizeof(struct sctphdr);
6024			if (SCTP_BUF_LEN(m) < offset) {
6025				if ((m = m_pullup(m, offset)) == NULL) {
6026					SCTP_STAT_INCR(sctps_hdrops);
6027					return (IPPROTO_DONE);
6028				}
6029			}
6030			ip = mtod(m, struct ip *);
6031			sh = (struct sctphdr *)((caddr_t)ip + off);
6032			tag = htonl(sh->v_tag);
6033			flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
6034			m->m_pkthdr.flowid = flowid;
6035			M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
6036		}
6037		cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
6038		sctp_queue_to_mcore(m, off, cpu_to_use);
6039		return (IPPROTO_DONE);
6040	}
6041#endif
6042	sctp_input_with_port(m, off, 0);
6043	return (IPPROTO_DONE);
6044}
6045#endif
6046