sctp_input.c revision 252585
122208Sdavidn/*-
222208Sdavidn * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
322208Sdavidn * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
422208Sdavidn * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
522208Sdavidn *
622208Sdavidn * Redistribution and use in source and binary forms, with or without
722208Sdavidn * modification, are permitted provided that the following conditions are met:
822208Sdavidn *
922208Sdavidn * a) Redistributions of source code must retain the above copyright notice,
1022208Sdavidn *    this list of conditions and the following disclaimer.
1122208Sdavidn *
1222208Sdavidn * b) Redistributions in binary form must reproduce the above copyright
1322208Sdavidn *    notice, this list of conditions and the following disclaimer in
1422208Sdavidn *    the documentation and/or other materials provided with the distribution.
1522208Sdavidn *
1622208Sdavidn * c) Neither the name of Cisco Systems, Inc. nor the names of its
1722208Sdavidn *    contributors may be used to endorse or promote products derived
1822208Sdavidn *    from this software without specific prior written permission.
1922208Sdavidn *
2022208Sdavidn * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2122208Sdavidn * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2222208Sdavidn * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2322208Sdavidn * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
2422208Sdavidn * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2522208Sdavidn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26216582Scharnier * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27216582Scharnier * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2831331Scharnier * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2991214Sbde * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
3022208Sdavidn * THE POSSIBILITY OF SUCH DAMAGE.
3122208Sdavidn */
3291214Sbde
3331331Scharnier#include <sys/cdefs.h>
3422208Sdavidn__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 252585 2013-07-03 18:48:43Z tuexen $");
3522208Sdavidn
3622208Sdavidn#include <netinet/sctp_os.h>
3722208Sdavidn#include <netinet/sctp_var.h>
3822208Sdavidn#include <netinet/sctp_sysctl.h>
3922208Sdavidn#include <netinet/sctp_pcb.h>
40144716Sstefanf#include <netinet/sctp_header.h>
4122208Sdavidn#include <netinet/sctputil.h>
4222208Sdavidn#include <netinet/sctp_output.h>
4322208Sdavidn#include <netinet/sctp_input.h>
4422208Sdavidn#include <netinet/sctp_auth.h>
4522208Sdavidn#include <netinet/sctp_indata.h>
4622208Sdavidn#include <netinet/sctp_asconf.h>
4722208Sdavidn#include <netinet/sctp_bsd_addr.h>
4822208Sdavidn#include <netinet/sctp_timer.h>
4922208Sdavidn#include <netinet/sctp_crc32.h>
5022208Sdavidn#include <netinet/udp.h>
5122208Sdavidn#include <sys/smp.h>
5222208Sdavidn
5322208Sdavidn
5422208Sdavidn
5522208Sdavidnstatic void
5622208Sdavidnsctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
5722208Sdavidn{
5822208Sdavidn	struct sctp_nets *net;
5922208Sdavidn
6090301Simp	/*
6190301Simp	 * This now not only stops all cookie timers it also stops any INIT
6290301Simp	 * timers as well. This will make sure that the timers are stopped
6390301Simp	 * in all collision cases.
6490301Simp	 */
6590301Simp	SCTP_TCB_LOCK_ASSERT(stcb);
6690301Simp	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
6790301Simp		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
6890301Simp			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
6922208Sdavidn			    stcb->sctp_ep,
7022208Sdavidn			    stcb,
7122208Sdavidn			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
7222208Sdavidn		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
7322208Sdavidn			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
7422208Sdavidn			    stcb->sctp_ep,
7522208Sdavidn			    stcb,
7622208Sdavidn			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
7722208Sdavidn		}
7822208Sdavidn	}
79216582Scharnier}
8022208Sdavidn
8122208Sdavidn/* INIT handler */
8222208Sdavidnstatic void
8322208Sdavidnsctp_handle_init(struct mbuf *m, int iphlen, int offset,
8422208Sdavidn    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
8522208Sdavidn    struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
8622208Sdavidn    struct sctp_tcb *stcb, int *abort_no_unlock,
8722208Sdavidn    uint8_t use_mflowid, uint32_t mflowid,
8822208Sdavidn    uint32_t vrf_id, uint16_t port)
8922208Sdavidn{
9022208Sdavidn	struct sctp_init *init;
9122208Sdavidn	struct mbuf *op_err;
9222208Sdavidn
9322208Sdavidn	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
9422208Sdavidn	    (void *)stcb);
9590301Simp	if (stcb == NULL) {
9622208Sdavidn		SCTP_INP_RLOCK(inp);
9722208Sdavidn	}
9822208Sdavidn	/* validate length */
9922208Sdavidn	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
10022208Sdavidn		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
10122208Sdavidn		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
10222208Sdavidn		    use_mflowid, mflowid,
10322208Sdavidn		    vrf_id, port);
10422208Sdavidn		if (stcb)
10522208Sdavidn			*abort_no_unlock = 1;
10622208Sdavidn		goto outnow;
10790301Simp	}
10822208Sdavidn	/* validate parameters */
10922208Sdavidn	init = &cp->init;
11022208Sdavidn	if (init->initiate_tag == 0) {
11122208Sdavidn		/* protocol error... send abort */
11222208Sdavidn		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
11322208Sdavidn		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
11422208Sdavidn		    use_mflowid, mflowid,
11522208Sdavidn		    vrf_id, port);
11622208Sdavidn		if (stcb)
11722208Sdavidn			*abort_no_unlock = 1;
11822208Sdavidn		goto outnow;
11922208Sdavidn	}
12022208Sdavidn	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
12122208Sdavidn		/* invalid parameter... send abort */
12222208Sdavidn		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
12322208Sdavidn		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
12422208Sdavidn		    use_mflowid, mflowid,
12522208Sdavidn		    vrf_id, port);
12622208Sdavidn		if (stcb)
12722208Sdavidn			*abort_no_unlock = 1;
12822208Sdavidn		goto outnow;
12922208Sdavidn	}
13022208Sdavidn	if (init->num_inbound_streams == 0) {
13122208Sdavidn		/* protocol error... send abort */
13222208Sdavidn		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
13322208Sdavidn		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
13490301Simp		    use_mflowid, mflowid,
13522208Sdavidn		    vrf_id, port);
13622208Sdavidn		if (stcb)
13722208Sdavidn			*abort_no_unlock = 1;
13822208Sdavidn		goto outnow;
13922208Sdavidn	}
14022208Sdavidn	if (init->num_outbound_streams == 0) {
14122208Sdavidn		/* protocol error... send abort */
14222208Sdavidn		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
14322208Sdavidn		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
14422208Sdavidn		    use_mflowid, mflowid,
14522208Sdavidn		    vrf_id, port);
14622208Sdavidn		if (stcb)
14722208Sdavidn			*abort_no_unlock = 1;
14822208Sdavidn		goto outnow;
14922208Sdavidn	}
15022208Sdavidn	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
15122208Sdavidn	    offset + ntohs(cp->ch.chunk_length))) {
15222208Sdavidn		/* auth parameter(s) error... send abort */
15322208Sdavidn		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, NULL,
15422208Sdavidn		    use_mflowid, mflowid,
15522208Sdavidn		    vrf_id, port);
15622208Sdavidn		if (stcb)
15722208Sdavidn			*abort_no_unlock = 1;
15822208Sdavidn		goto outnow;
15922208Sdavidn	}
16022208Sdavidn	/*
16122208Sdavidn	 * We are only accepting if we have a socket with positive
16222208Sdavidn	 * so_qlimit.
16322208Sdavidn	 */
16422208Sdavidn	if ((stcb == NULL) &&
16522208Sdavidn	    ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
16622208Sdavidn	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
16722208Sdavidn	    (inp->sctp_socket == NULL) ||
16822208Sdavidn	    (inp->sctp_socket->so_qlimit == 0))) {
16922208Sdavidn		/*
17022208Sdavidn		 * FIX ME ?? What about TCP model and we have a
17122208Sdavidn		 * match/restart case? Actually no fix is needed. the lookup
17222208Sdavidn		 * will always find the existing assoc so stcb would not be
17322208Sdavidn		 * NULL. It may be questionable to do this since we COULD
17422208Sdavidn		 * just send back the INIT-ACK and hope that the app did
17522208Sdavidn		 * accept()'s by the time the COOKIE was sent. But there is
17622208Sdavidn		 * a price to pay for COOKIE generation and I don't want to
17722208Sdavidn		 * pay it on the chance that the app will actually do some
17822208Sdavidn		 * accepts(). The App just looses and should NOT be in this
17922208Sdavidn		 * state :-)
18022208Sdavidn		 */
18122208Sdavidn		if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
18222208Sdavidn			sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL,
18322208Sdavidn			    use_mflowid, mflowid,
18422208Sdavidn			    vrf_id, port);
18522208Sdavidn		}
18622208Sdavidn		goto outnow;
18722208Sdavidn	}
18822208Sdavidn	if ((stcb != NULL) &&
18922208Sdavidn	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
19022208Sdavidn		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
19122208Sdavidn		sctp_send_shutdown_ack(stcb, NULL);
19222208Sdavidn		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
19322208Sdavidn	} else {
19422208Sdavidn		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
19522208Sdavidn		sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, src, dst,
19622208Sdavidn		    sh, cp,
19722208Sdavidn		    use_mflowid, mflowid,
19822208Sdavidn		    vrf_id, port,
19922208Sdavidn		    ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
20022208Sdavidn	}
20122208Sdavidnoutnow:
20222208Sdavidn	if (stcb == NULL) {
20322208Sdavidn		SCTP_INP_RUNLOCK(inp);
20422208Sdavidn	}
20522208Sdavidn}
20622208Sdavidn
20722208Sdavidn/*
20822208Sdavidn * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
20929003Sdavidn */
21022208Sdavidn
21122208Sdavidnint
21222208Sdavidnsctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked
21322208Sdavidn#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
21422208Sdavidn    SCTP_UNUSED
21522208Sdavidn#endif
21622208Sdavidn)
21722208Sdavidn{
21822208Sdavidn	int unsent_data = 0;
21922208Sdavidn	unsigned int i;
22022208Sdavidn	struct sctp_stream_queue_pending *sp;
22122208Sdavidn	struct sctp_association *asoc;
22222208Sdavidn
22322208Sdavidn	/*
22422208Sdavidn	 * This function returns the number of streams that have true unsent
22522208Sdavidn	 * data on them. Note that as it looks through it will clean up any
22622208Sdavidn	 * places that have old data that has been sent but left at top of
22722208Sdavidn	 * stream queue.
22822208Sdavidn	 */
22922208Sdavidn	asoc = &stcb->asoc;
23022208Sdavidn	SCTP_TCB_SEND_LOCK(stcb);
23122208Sdavidn	if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
23222208Sdavidn		/* Check to see if some data queued */
23322208Sdavidn		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
23422208Sdavidn			/* sa_ignore FREED_MEMORY */
23522208Sdavidn			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
23622208Sdavidn			if (sp == NULL) {
23722208Sdavidn				continue;
23822208Sdavidn			}
23922208Sdavidn			if ((sp->msg_is_complete) &&
24090301Simp			    (sp->length == 0) &&
24122208Sdavidn			    (sp->sender_all_done)) {
24222208Sdavidn				/*
24322208Sdavidn				 * We are doing differed cleanup. Last time
24422208Sdavidn				 * through when we took all the data the
24522208Sdavidn				 * sender_all_done was not set.
24622208Sdavidn				 */
24722208Sdavidn				if (sp->put_last_out == 0) {
24822208Sdavidn					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
24922208Sdavidn					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
25022208Sdavidn					    sp->sender_all_done,
25122208Sdavidn					    sp->length,
25222208Sdavidn					    sp->msg_is_complete,
25322208Sdavidn					    sp->put_last_out);
25422208Sdavidn				}
25522208Sdavidn				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
25622208Sdavidn				TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
25722208Sdavidn				if (sp->net) {
25822208Sdavidn					sctp_free_remote_addr(sp->net);
25922208Sdavidn					sp->net = NULL;
26022208Sdavidn				}
26122208Sdavidn				if (sp->data) {
26222208Sdavidn					sctp_m_freem(sp->data);
26322208Sdavidn					sp->data = NULL;
26422208Sdavidn				}
26522208Sdavidn				sctp_free_a_strmoq(stcb, sp, so_locked);
26622208Sdavidn			} else {
26722208Sdavidn				unsent_data++;
26822208Sdavidn				break;
26922208Sdavidn			}
27022208Sdavidn		}
27122208Sdavidn	}
27222208Sdavidn	SCTP_TCB_SEND_UNLOCK(stcb);
27322208Sdavidn	return (unsent_data);
27490301Simp}
27522208Sdavidn
27622208Sdavidnstatic int
27722208Sdavidnsctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
27822208Sdavidn{
27922208Sdavidn	struct sctp_init *init;
28022208Sdavidn	struct sctp_association *asoc;
28122208Sdavidn	struct sctp_nets *lnet;
28222208Sdavidn	unsigned int i;
28322208Sdavidn
28422208Sdavidn	init = &cp->init;
28522208Sdavidn	asoc = &stcb->asoc;
28622208Sdavidn	/* save off parameters */
28722208Sdavidn	asoc->peer_vtag = ntohl(init->initiate_tag);
28822208Sdavidn	asoc->peers_rwnd = ntohl(init->a_rwnd);
28922208Sdavidn	/* init tsn's */
29022208Sdavidn	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
29122208Sdavidn
29222208Sdavidn	if (!TAILQ_EMPTY(&asoc->nets)) {
29322208Sdavidn		/* update any ssthresh's that may have a default */
29422208Sdavidn		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
29522208Sdavidn			lnet->ssthresh = asoc->peers_rwnd;
29622208Sdavidn			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
29722208Sdavidn				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
29822208Sdavidn			}
299108470Sschweikh		}
30022208Sdavidn	}
30122208Sdavidn	SCTP_TCB_SEND_LOCK(stcb);
30222208Sdavidn	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
30390301Simp		unsigned int newcnt;
30422208Sdavidn		struct sctp_stream_out *outs;
30522208Sdavidn		struct sctp_stream_queue_pending *sp, *nsp;
30622208Sdavidn		struct sctp_tmit_chunk *chk, *nchk;
30722208Sdavidn
30822208Sdavidn		/* abandon the upper streams */
30922208Sdavidn		newcnt = ntohs(init->num_inbound_streams);
31022208Sdavidn		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
31122208Sdavidn			if (chk->rec.data.stream_number >= newcnt) {
31222208Sdavidn				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
31322208Sdavidn				asoc->send_queue_cnt--;
31422208Sdavidn				if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
31522208Sdavidn					asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
31622208Sdavidn#ifdef INVARIANTS
31722208Sdavidn				} else {
31890301Simp					panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
31922208Sdavidn#endif
32022208Sdavidn				}
32122208Sdavidn				if (chk->data != NULL) {
32222208Sdavidn					sctp_free_bufspace(stcb, asoc, chk, 1);
32322208Sdavidn					sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
32422208Sdavidn					    0, chk, SCTP_SO_NOT_LOCKED);
32522208Sdavidn					if (chk->data) {
32622208Sdavidn						sctp_m_freem(chk->data);
32722208Sdavidn						chk->data = NULL;
32822208Sdavidn					}
32922208Sdavidn				}
33022208Sdavidn				sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
33122208Sdavidn				/* sa_ignore FREED_MEMORY */
33222208Sdavidn			}
33322208Sdavidn		}
33422208Sdavidn		if (asoc->strmout) {
33522208Sdavidn			for (i = newcnt; i < asoc->pre_open_streams; i++) {
33622208Sdavidn				outs = &asoc->strmout[i];
33722208Sdavidn				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
33822208Sdavidn					TAILQ_REMOVE(&outs->outqueue, sp, next);
33922208Sdavidn					asoc->stream_queue_cnt--;
34022208Sdavidn					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
34122208Sdavidn					    stcb, 0, sp, SCTP_SO_NOT_LOCKED);
34222208Sdavidn					if (sp->data) {
34322208Sdavidn						sctp_m_freem(sp->data);
34422208Sdavidn						sp->data = NULL;
34522208Sdavidn					}
34622208Sdavidn					if (sp->net) {
34722208Sdavidn						sctp_free_remote_addr(sp->net);
34822208Sdavidn						sp->net = NULL;
34922208Sdavidn					}
35022208Sdavidn					/* Free the chunk */
35122208Sdavidn					sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
35222208Sdavidn					/* sa_ignore FREED_MEMORY */
35322208Sdavidn				}
35422208Sdavidn			}
35522208Sdavidn		}
35622208Sdavidn		/* cut back the count */
357126952Sbde		asoc->pre_open_streams = newcnt;
35822208Sdavidn	}
35922208Sdavidn	SCTP_TCB_SEND_UNLOCK(stcb);
36022208Sdavidn	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
36122208Sdavidn
36222208Sdavidn	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
36322208Sdavidn	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
36422208Sdavidn	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
36522208Sdavidn		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
36622208Sdavidn	}
36722208Sdavidn	/* This is the next one we expect */
36822208Sdavidn	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
36922208Sdavidn
37022208Sdavidn	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
37122208Sdavidn	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
37222208Sdavidn
37322208Sdavidn	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
37422208Sdavidn	/* open the requested streams */
37522208Sdavidn
37622208Sdavidn	if (asoc->strmin != NULL) {
37722208Sdavidn		/* Free the old ones */
37822208Sdavidn		struct sctp_queued_to_read *ctl, *nctl;
37922208Sdavidn
38022208Sdavidn		for (i = 0; i < asoc->streamincnt; i++) {
38122208Sdavidn			TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) {
38222208Sdavidn				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
38322208Sdavidn				sctp_free_remote_addr(ctl->whoFrom);
38422208Sdavidn				ctl->whoFrom = NULL;
38522208Sdavidn				sctp_m_freem(ctl->data);
38622208Sdavidn				ctl->data = NULL;
38790301Simp				sctp_free_a_readq(stcb, ctl);
38822208Sdavidn			}
38922208Sdavidn		}
39022208Sdavidn		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
391228582Sdim	}
39222208Sdavidn	asoc->streamincnt = ntohs(init->num_outbound_streams);
39322208Sdavidn	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
39422208Sdavidn		asoc->streamincnt = MAX_SCTP_STREAMS;
39522208Sdavidn	}
39622208Sdavidn	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
39722208Sdavidn	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
39822208Sdavidn	if (asoc->strmin == NULL) {
39922208Sdavidn		/* we didn't get memory for the streams! */
40022208Sdavidn		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
40122208Sdavidn		return (-1);
40222208Sdavidn	}
40322208Sdavidn	for (i = 0; i < asoc->streamincnt; i++) {
40422208Sdavidn		asoc->strmin[i].stream_no = i;
40522208Sdavidn		asoc->strmin[i].last_sequence_delivered = 0xffff;
40622208Sdavidn		/*
40722208Sdavidn		 * U-stream ranges will be set when the cookie is unpacked.
40822208Sdavidn		 * Or for the INIT sender they are un set (if pr-sctp not
40922208Sdavidn		 * supported) when the INIT-ACK arrives.
41022208Sdavidn		 */
41122208Sdavidn		TAILQ_INIT(&asoc->strmin[i].inqueue);
41222208Sdavidn		asoc->strmin[i].delivery_started = 0;
41322208Sdavidn	}
41422208Sdavidn	/*
41522208Sdavidn	 * load_address_from_init will put the addresses into the
41622208Sdavidn	 * association when the COOKIE is processed or the INIT-ACK is
41722208Sdavidn	 * processed. Both types of COOKIE's existing and new call this
41822208Sdavidn	 * routine. It will remove addresses that are no longer in the
41922208Sdavidn	 * association (for the restarting case where addresses are
42022208Sdavidn	 * removed). Up front when the INIT arrives we will discard it if it
42122208Sdavidn	 * is a restart and new addresses have been added.
42222208Sdavidn	 */
42322208Sdavidn	/* sa_ignore MEMLEAK */
42422208Sdavidn	return (0);
42522208Sdavidn}
42622208Sdavidn
42722208Sdavidn/*
42822208Sdavidn * INIT-ACK message processing/consumption returns value < 0 on error
42922208Sdavidn */
43022208Sdavidnstatic int
43122208Sdavidnsctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
43222208Sdavidn    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
43322208Sdavidn    struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
43422208Sdavidn    struct sctp_nets *net, int *abort_no_unlock,
43522208Sdavidn    uint8_t use_mflowid, uint32_t mflowid,
43622208Sdavidn    uint32_t vrf_id)
43722208Sdavidn{
43822208Sdavidn	struct sctp_association *asoc;
43922208Sdavidn	struct mbuf *op_err;
44090301Simp	int retval, abort_flag;
44122208Sdavidn	uint32_t initack_limit;
44222208Sdavidn	int nat_friendly = 0;
44322208Sdavidn
44422208Sdavidn	/* First verify that we have no illegal param's */
44522208Sdavidn	abort_flag = 0;
44622208Sdavidn	op_err = NULL;
44722208Sdavidn
44822208Sdavidn	op_err = sctp_arethere_unrecognized_parameters(m,
44922208Sdavidn	    (offset + sizeof(struct sctp_init_chunk)),
45022208Sdavidn	    &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
45122208Sdavidn	if (abort_flag) {
45222208Sdavidn		/* Send an abort and notify peer */
45322208Sdavidn		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
45422491Sdavidn		*abort_no_unlock = 1;
45522491Sdavidn		return (-1);
45622491Sdavidn	}
45722208Sdavidn	asoc = &stcb->asoc;
45822491Sdavidn	asoc->peer_supports_nat = (uint8_t) nat_friendly;
45922491Sdavidn	/* process the peer's parameters in the INIT-ACK */
46022491Sdavidn	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
46122491Sdavidn	if (retval < 0) {
46222208Sdavidn		return (retval);
46322491Sdavidn	}
46422491Sdavidn	initack_limit = offset + ntohs(cp->ch.chunk_length);
46522208Sdavidn	/* load all addresses */
46622491Sdavidn	if ((retval = sctp_load_addresses_from_init(stcb, m,
46722491Sdavidn	    (offset + sizeof(struct sctp_init_chunk)), initack_limit,
46822491Sdavidn	    src, dst, NULL))) {
46922491Sdavidn		/* Huh, we should abort */
47022491Sdavidn		SCTPDBG(SCTP_DEBUG_INPUT1,
47122491Sdavidn		    "Load addresses from INIT causes an abort %d\n",
47222208Sdavidn		    retval);
47322491Sdavidn		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
47422491Sdavidn		    src, dst, sh, NULL,
47522491Sdavidn		    use_mflowid, mflowid,
47622208Sdavidn		    vrf_id, net->port);
47722491Sdavidn		*abort_no_unlock = 1;
47822491Sdavidn		return (-1);
47922491Sdavidn	}
48022491Sdavidn	/* if the peer doesn't support asconf, flush the asconf queue */
48122208Sdavidn	if (asoc->peer_supports_asconf == 0) {
48222208Sdavidn		struct sctp_asconf_addr *param, *nparam;
48322208Sdavidn
48422208Sdavidn		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
48522208Sdavidn			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
48622208Sdavidn			SCTP_FREE(param, SCTP_M_ASC_ADDR);
48722208Sdavidn		}
48822208Sdavidn	}
489	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
490	    stcb->asoc.local_hmacs);
491	if (op_err) {
492		sctp_queue_op_err(stcb, op_err);
493		/* queuing will steal away the mbuf chain to the out queue */
494		op_err = NULL;
495	}
496	/* extract the cookie and queue it to "echo" it back... */
497	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
498		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
499		    stcb->asoc.overall_error_count,
500		    0,
501		    SCTP_FROM_SCTP_INPUT,
502		    __LINE__);
503	}
504	stcb->asoc.overall_error_count = 0;
505	net->error_count = 0;
506
507	/*
508	 * Cancel the INIT timer, We do this first before queueing the
509	 * cookie. We always cancel at the primary to assue that we are
510	 * canceling the timer started by the INIT which always goes to the
511	 * primary.
512	 */
513	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
514	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
515
516	/* calculate the RTO */
517	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy,
518	    SCTP_RTT_FROM_NON_DATA);
519
520	retval = sctp_send_cookie_echo(m, offset, stcb, net);
521	if (retval < 0) {
522		/*
523		 * No cookie, we probably should send a op error. But in any
524		 * case if there is no cookie in the INIT-ACK, we can
525		 * abandon the peer, its broke.
526		 */
527		if (retval == -3) {
528			/* We abort with an error of missing mandatory param */
529			op_err =
530			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
531			if (op_err) {
532				/*
533				 * Expand beyond to include the mandatory
534				 * param cookie
535				 */
536				struct sctp_inv_mandatory_param *mp;
537
538				SCTP_BUF_LEN(op_err) =
539				    sizeof(struct sctp_inv_mandatory_param);
540				mp = mtod(op_err,
541				    struct sctp_inv_mandatory_param *);
542				/* Subtract the reserved param */
543				mp->length =
544				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
545				mp->num_param = htonl(1);
546				mp->param = htons(SCTP_STATE_COOKIE);
547				mp->resv = 0;
548			}
549			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
550			    src, dst, sh, op_err,
551			    use_mflowid, mflowid,
552			    vrf_id, net->port);
553			*abort_no_unlock = 1;
554		}
555		return (retval);
556	}
557	return (0);
558}
559
560static void
561sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
562    struct sctp_tcb *stcb, struct sctp_nets *net)
563{
564	struct sockaddr_storage store;
565	struct sctp_nets *r_net, *f_net;
566	struct timeval tv;
567	int req_prim = 0;
568	uint16_t old_error_counter;
569
570#ifdef INET
571	struct sockaddr_in *sin;
572
573#endif
574#ifdef INET6
575	struct sockaddr_in6 *sin6;
576
577#endif
578
579	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
580		/* Invalid length */
581		return;
582	}
583	memset(&store, 0, sizeof(store));
584	switch (cp->heartbeat.hb_info.addr_family) {
585#ifdef INET
586	case AF_INET:
587		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
588			sin = (struct sockaddr_in *)&store;
589			sin->sin_family = cp->heartbeat.hb_info.addr_family;
590			sin->sin_len = cp->heartbeat.hb_info.addr_len;
591			sin->sin_port = stcb->rport;
592			memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
593			    sizeof(sin->sin_addr));
594		} else {
595			return;
596		}
597		break;
598#endif
599#ifdef INET6
600	case AF_INET6:
601		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
602			sin6 = (struct sockaddr_in6 *)&store;
603			sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
604			sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
605			sin6->sin6_port = stcb->rport;
606			memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
607			    sizeof(sin6->sin6_addr));
608		} else {
609			return;
610		}
611		break;
612#endif
613	default:
614		return;
615	}
616	r_net = sctp_findnet(stcb, (struct sockaddr *)&store);
617	if (r_net == NULL) {
618		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
619		return;
620	}
621	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
622	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
623	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
624		/*
625		 * If the its a HB and it's random value is correct when can
626		 * confirm the destination.
627		 */
628		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
629		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
630			stcb->asoc.primary_destination = r_net;
631			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
632			f_net = TAILQ_FIRST(&stcb->asoc.nets);
633			if (f_net != r_net) {
634				/*
635				 * first one on the list is NOT the primary
636				 * sctp_cmpaddr() is much more efficent if
637				 * the primary is the first on the list,
638				 * make it so.
639				 */
640				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
641				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
642			}
643			req_prim = 1;
644		}
645		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
646		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
647		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
648		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
649	}
650	old_error_counter = r_net->error_count;
651	r_net->error_count = 0;
652	r_net->hb_responded = 1;
653	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
654	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
655	/* Now lets do a RTO with this */
656	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy,
657	    SCTP_RTT_FROM_NON_DATA);
658	if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
659		r_net->dest_state |= SCTP_ADDR_REACHABLE;
660		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
661		    0, (void *)r_net, SCTP_SO_NOT_LOCKED);
662	}
663	if (r_net->dest_state & SCTP_ADDR_PF) {
664		r_net->dest_state &= ~SCTP_ADDR_PF;
665		stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
666	}
667	if (old_error_counter > 0) {
668		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
669		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
670	}
671	if (r_net == stcb->asoc.primary_destination) {
672		if (stcb->asoc.alternate) {
673			/* release the alternate, primary is good */
674			sctp_free_remote_addr(stcb->asoc.alternate);
675			stcb->asoc.alternate = NULL;
676		}
677	}
678	/* Mobility adaptation */
679	if (req_prim) {
680		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
681		    SCTP_MOBILITY_BASE) ||
682		    sctp_is_mobility_feature_on(stcb->sctp_ep,
683		    SCTP_MOBILITY_FASTHANDOFF)) &&
684		    sctp_is_mobility_feature_on(stcb->sctp_ep,
685		    SCTP_MOBILITY_PRIM_DELETED)) {
686
687			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
688			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
689			    SCTP_MOBILITY_FASTHANDOFF)) {
690				sctp_assoc_immediate_retrans(stcb,
691				    stcb->asoc.primary_destination);
692			}
693			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
694			    SCTP_MOBILITY_BASE)) {
695				sctp_move_chunks_from_net(stcb,
696				    stcb->asoc.deleted_primary);
697			}
698			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
699			    stcb->asoc.deleted_primary);
700		}
701	}
702}
703
704static int
705sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
706{
707	/*
708	 * return 0 means we want you to proceed with the abort non-zero
709	 * means no abort processing
710	 */
711	struct sctpasochead *head;
712
713	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
714		/* generate a new vtag and send init */
715		LIST_REMOVE(stcb, sctp_asocs);
716		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
717		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
718		/*
719		 * put it in the bucket in the vtag hash of assoc's for the
720		 * system
721		 */
722		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
723		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
724		return (1);
725	}
726	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
727		/*
728		 * treat like a case where the cookie expired i.e.: - dump
729		 * current cookie. - generate a new vtag. - resend init.
730		 */
731		/* generate a new vtag and send init */
732		LIST_REMOVE(stcb, sctp_asocs);
733		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
734		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
735		sctp_stop_all_cookie_timers(stcb);
736		sctp_toss_old_cookies(stcb, &stcb->asoc);
737		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
738		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
739		/*
740		 * put it in the bucket in the vtag hash of assoc's for the
741		 * system
742		 */
743		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
744		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
745		return (1);
746	}
747	return (0);
748}
749
750static int
751sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
752    struct sctp_nets *net)
753{
754	/*
755	 * return 0 means we want you to proceed with the abort non-zero
756	 * means no abort processing
757	 */
758	if (stcb->asoc.peer_supports_auth == 0) {
759		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
760		return (0);
761	}
762	sctp_asconf_send_nat_state_update(stcb, net);
763	return (1);
764}
765
766
767static void
768sctp_handle_abort(struct sctp_abort_chunk *abort,
769    struct sctp_tcb *stcb, struct sctp_nets *net)
770{
771#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
772	struct socket *so;
773
774#endif
775	uint16_t len;
776	uint16_t error;
777
778	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
779	if (stcb == NULL)
780		return;
781
782	len = ntohs(abort->ch.chunk_length);
783	if (len > sizeof(struct sctp_chunkhdr)) {
784		/*
785		 * Need to check the cause codes for our two magic nat
786		 * aborts which don't kill the assoc necessarily.
787		 */
788		struct sctp_missing_nat_state *natc;
789
790		natc = (struct sctp_missing_nat_state *)(abort + 1);
791		error = ntohs(natc->cause);
792		if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
793			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
794			    abort->ch.chunk_flags);
795			if (sctp_handle_nat_colliding_state(stcb)) {
796				return;
797			}
798		} else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
799			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
800			    abort->ch.chunk_flags);
801			if (sctp_handle_nat_missing_state(stcb, net)) {
802				return;
803			}
804		}
805	} else {
806		error = 0;
807	}
808	/* stop any receive timers */
809	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
810	/* notify user of the abort and clean up... */
811	sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
812	/* free the tcb */
813	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
814	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
815	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
816		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
817	}
818#ifdef SCTP_ASOCLOG_OF_TSNS
819	sctp_print_out_track_log(stcb);
820#endif
821#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
822	so = SCTP_INP_SO(stcb->sctp_ep);
823	atomic_add_int(&stcb->asoc.refcnt, 1);
824	SCTP_TCB_UNLOCK(stcb);
825	SCTP_SOCKET_LOCK(so, 1);
826	SCTP_TCB_LOCK(stcb);
827	atomic_subtract_int(&stcb->asoc.refcnt, 1);
828#endif
829	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
830	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
831	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
832#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
833	SCTP_SOCKET_UNLOCK(so, 1);
834#endif
835	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
836}
837
838static void
839sctp_start_net_timers(struct sctp_tcb *stcb)
840{
841	uint32_t cnt_hb_sent;
842	struct sctp_nets *net;
843
844	cnt_hb_sent = 0;
845	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
846		/*
847		 * For each network start: 1) A pmtu timer. 2) A HB timer 3)
848		 * If the dest in unconfirmed send a hb as well if under
849		 * max_hb_burst have been sent.
850		 */
851		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
852		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
853		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
854		    (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
855			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
856			cnt_hb_sent++;
857		}
858	}
859	if (cnt_hb_sent) {
860		sctp_chunk_output(stcb->sctp_ep, stcb,
861		    SCTP_OUTPUT_FROM_COOKIE_ACK,
862		    SCTP_SO_NOT_LOCKED);
863	}
864}
865
866
867static void
868sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
869    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
870{
871	struct sctp_association *asoc;
872	int some_on_streamwheel;
873
874#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
875	struct socket *so;
876
877#endif
878
879	SCTPDBG(SCTP_DEBUG_INPUT2,
880	    "sctp_handle_shutdown: handling SHUTDOWN\n");
881	if (stcb == NULL)
882		return;
883	asoc = &stcb->asoc;
884	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
885	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
886		return;
887	}
888	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
889		/* Shutdown NOT the expected size */
890		return;
891	} else {
892		sctp_update_acked(stcb, cp, abort_flag);
893		if (*abort_flag) {
894			return;
895		}
896	}
897	if (asoc->control_pdapi) {
898		/*
899		 * With a normal shutdown we assume the end of last record.
900		 */
901		SCTP_INP_READ_LOCK(stcb->sctp_ep);
902		asoc->control_pdapi->end_added = 1;
903		asoc->control_pdapi->pdapi_aborted = 1;
904		asoc->control_pdapi = NULL;
905		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
906#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
907		so = SCTP_INP_SO(stcb->sctp_ep);
908		atomic_add_int(&stcb->asoc.refcnt, 1);
909		SCTP_TCB_UNLOCK(stcb);
910		SCTP_SOCKET_LOCK(so, 1);
911		SCTP_TCB_LOCK(stcb);
912		atomic_subtract_int(&stcb->asoc.refcnt, 1);
913		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
914			/* assoc was freed while we were unlocked */
915			SCTP_SOCKET_UNLOCK(so, 1);
916			return;
917		}
918#endif
919		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
920#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
921		SCTP_SOCKET_UNLOCK(so, 1);
922#endif
923	}
924	/* goto SHUTDOWN_RECEIVED state to block new requests */
925	if (stcb->sctp_socket) {
926		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
927		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
928		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
929			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
930			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
931			/*
932			 * notify upper layer that peer has initiated a
933			 * shutdown
934			 */
935			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
936
937			/* reset time */
938			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
939		}
940	}
941	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
942		/*
943		 * stop the shutdown timer, since we WILL move to
944		 * SHUTDOWN-ACK-SENT.
945		 */
946		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
947	}
948	/* Now is there unsent data on a stream somewhere? */
949	some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
950
951	if (!TAILQ_EMPTY(&asoc->send_queue) ||
952	    !TAILQ_EMPTY(&asoc->sent_queue) ||
953	    some_on_streamwheel) {
954		/* By returning we will push more data out */
955		return;
956	} else {
957		/* no outstanding data to send, so move on... */
958		/* send SHUTDOWN-ACK */
959		/* move to SHUTDOWN-ACK-SENT state */
960		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
961		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
962			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
963		}
964		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
965		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
966		sctp_stop_timers_for_shutdown(stcb);
967		sctp_send_shutdown_ack(stcb, net);
968		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
969		    stcb, net);
970	}
971}
972
973static void
974sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
975    struct sctp_tcb *stcb,
976    struct sctp_nets *net)
977{
978	struct sctp_association *asoc;
979
980#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
981	struct socket *so;
982
983	so = SCTP_INP_SO(stcb->sctp_ep);
984#endif
985	SCTPDBG(SCTP_DEBUG_INPUT2,
986	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
987	if (stcb == NULL)
988		return;
989
990	asoc = &stcb->asoc;
991	/* process according to association state */
992	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
993	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
994		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
995		sctp_send_shutdown_complete(stcb, net, 1);
996		SCTP_TCB_UNLOCK(stcb);
997		return;
998	}
999	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
1000	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1001		/* unexpected SHUTDOWN-ACK... so ignore... */
1002		SCTP_TCB_UNLOCK(stcb);
1003		return;
1004	}
1005	if (asoc->control_pdapi) {
1006		/*
1007		 * With a normal shutdown we assume the end of last record.
1008		 */
1009		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1010		asoc->control_pdapi->end_added = 1;
1011		asoc->control_pdapi->pdapi_aborted = 1;
1012		asoc->control_pdapi = NULL;
1013		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1014#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1015		atomic_add_int(&stcb->asoc.refcnt, 1);
1016		SCTP_TCB_UNLOCK(stcb);
1017		SCTP_SOCKET_LOCK(so, 1);
1018		SCTP_TCB_LOCK(stcb);
1019		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1020		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1021			/* assoc was freed while we were unlocked */
1022			SCTP_SOCKET_UNLOCK(so, 1);
1023			return;
1024		}
1025#endif
1026		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1027#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1028		SCTP_SOCKET_UNLOCK(so, 1);
1029#endif
1030	}
1031#ifdef INVARIANTS
1032	if (!TAILQ_EMPTY(&asoc->send_queue) ||
1033	    !TAILQ_EMPTY(&asoc->sent_queue) ||
1034	    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
1035		panic("Queues are not empty when handling SHUTDOWN-ACK");
1036	}
1037#endif
1038	/* stop the timer */
1039	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
1040	/* send SHUTDOWN-COMPLETE */
1041	sctp_send_shutdown_complete(stcb, net, 0);
1042	/* notify upper layer protocol */
1043	if (stcb->sctp_socket) {
1044		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1045		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1046			stcb->sctp_socket->so_snd.sb_cc = 0;
1047		}
1048		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1049	}
1050	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
1051	/* free the TCB but first save off the ep */
1052#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1053	atomic_add_int(&stcb->asoc.refcnt, 1);
1054	SCTP_TCB_UNLOCK(stcb);
1055	SCTP_SOCKET_LOCK(so, 1);
1056	SCTP_TCB_LOCK(stcb);
1057	atomic_subtract_int(&stcb->asoc.refcnt, 1);
1058#endif
1059	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1060	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
1061#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1062	SCTP_SOCKET_UNLOCK(so, 1);
1063#endif
1064}
1065
1066/*
1067 * Skip past the param header and then we will find the chunk that caused the
1068 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
1069 * our peer must be broken.
1070 */
1071static void
1072sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
1073    struct sctp_nets *net)
1074{
1075	struct sctp_chunkhdr *chk;
1076
1077	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
1078	switch (chk->chunk_type) {
1079	case SCTP_ASCONF_ACK:
1080	case SCTP_ASCONF:
1081		sctp_asconf_cleanup(stcb, net);
1082		break;
1083	case SCTP_FORWARD_CUM_TSN:
1084		stcb->asoc.peer_supports_prsctp = 0;
1085		break;
1086	default:
1087		SCTPDBG(SCTP_DEBUG_INPUT2,
1088		    "Peer does not support chunk type %d(%x)??\n",
1089		    chk->chunk_type, (uint32_t) chk->chunk_type);
1090		break;
1091	}
1092}
1093
1094/*
1095 * Skip past the param header and then we will find the param that caused the
1096 * problem.  There are a number of param's in a ASCONF OR the prsctp param
1097 * these will turn of specific features.
1098 */
1099static void
1100sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1101{
1102	struct sctp_paramhdr *pbad;
1103
1104	pbad = phdr + 1;
1105	switch (ntohs(pbad->param_type)) {
1106		/* pr-sctp draft */
1107	case SCTP_PRSCTP_SUPPORTED:
1108		stcb->asoc.peer_supports_prsctp = 0;
1109		break;
1110	case SCTP_SUPPORTED_CHUNK_EXT:
1111		break;
1112		/* draft-ietf-tsvwg-addip-sctp */
1113	case SCTP_HAS_NAT_SUPPORT:
1114		stcb->asoc.peer_supports_nat = 0;
1115		break;
1116	case SCTP_ADD_IP_ADDRESS:
1117	case SCTP_DEL_IP_ADDRESS:
1118	case SCTP_SET_PRIM_ADDR:
1119		stcb->asoc.peer_supports_asconf = 0;
1120		break;
1121	case SCTP_SUCCESS_REPORT:
1122	case SCTP_ERROR_CAUSE_IND:
1123		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1124		SCTPDBG(SCTP_DEBUG_INPUT2,
1125		    "Turning off ASCONF to this strange peer\n");
1126		stcb->asoc.peer_supports_asconf = 0;
1127		break;
1128	default:
1129		SCTPDBG(SCTP_DEBUG_INPUT2,
1130		    "Peer does not support param type %d(%x)??\n",
1131		    pbad->param_type, (uint32_t) pbad->param_type);
1132		break;
1133	}
1134}
1135
1136static int
1137sctp_handle_error(struct sctp_chunkhdr *ch,
1138    struct sctp_tcb *stcb, struct sctp_nets *net)
1139{
1140	int chklen;
1141	struct sctp_paramhdr *phdr;
1142	uint16_t error, error_type;
1143	uint16_t error_len;
1144	struct sctp_association *asoc;
1145	int adjust;
1146
1147#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1148	struct socket *so;
1149
1150#endif
1151
1152	/* parse through all of the errors and process */
1153	asoc = &stcb->asoc;
1154	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1155	    sizeof(struct sctp_chunkhdr));
1156	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1157	error = 0;
1158	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1159		/* Process an Error Cause */
1160		error_type = ntohs(phdr->param_type);
1161		error_len = ntohs(phdr->param_length);
1162		if ((error_len > chklen) || (error_len == 0)) {
1163			/* invalid param length for this param */
1164			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1165			    chklen, error_len);
1166			return (0);
1167		}
1168		if (error == 0) {
1169			/* report the first error cause */
1170			error = error_type;
1171		}
1172		switch (error_type) {
1173		case SCTP_CAUSE_INVALID_STREAM:
1174		case SCTP_CAUSE_MISSING_PARAM:
1175		case SCTP_CAUSE_INVALID_PARAM:
1176		case SCTP_CAUSE_NO_USER_DATA:
1177			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1178			    error_type);
1179			break;
1180		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1181			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1182			    ch->chunk_flags);
1183			if (sctp_handle_nat_colliding_state(stcb)) {
1184				return (0);
1185			}
1186			break;
1187		case SCTP_CAUSE_NAT_MISSING_STATE:
1188			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1189			    ch->chunk_flags);
1190			if (sctp_handle_nat_missing_state(stcb, net)) {
1191				return (0);
1192			}
1193			break;
1194		case SCTP_CAUSE_STALE_COOKIE:
1195			/*
1196			 * We only act if we have echoed a cookie and are
1197			 * waiting.
1198			 */
1199			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1200				int *p;
1201
1202				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1203				/* Save the time doubled */
1204				asoc->cookie_preserve_req = ntohl(*p) << 1;
1205				asoc->stale_cookie_count++;
1206				if (asoc->stale_cookie_count >
1207				    asoc->max_init_times) {
1208					sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
1209					/* now free the asoc */
1210#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1211					so = SCTP_INP_SO(stcb->sctp_ep);
1212					atomic_add_int(&stcb->asoc.refcnt, 1);
1213					SCTP_TCB_UNLOCK(stcb);
1214					SCTP_SOCKET_LOCK(so, 1);
1215					SCTP_TCB_LOCK(stcb);
1216					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1217#endif
1218					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1219					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1220#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1221					SCTP_SOCKET_UNLOCK(so, 1);
1222#endif
1223					return (-1);
1224				}
1225				/* blast back to INIT state */
1226				sctp_toss_old_cookies(stcb, &stcb->asoc);
1227				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1228				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1229				sctp_stop_all_cookie_timers(stcb);
1230				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1231			}
1232			break;
1233		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1234			/*
1235			 * Nothing we can do here, we don't do hostname
1236			 * addresses so if the peer does not like my IPv6
1237			 * (or IPv4 for that matter) it does not matter. If
1238			 * they don't support that type of address, they can
1239			 * NOT possibly get that packet type... i.e. with no
1240			 * IPv6 you can't recieve a IPv6 packet. so we can
1241			 * safely ignore this one. If we ever added support
1242			 * for HOSTNAME Addresses, then we would need to do
1243			 * something here.
1244			 */
1245			break;
1246		case SCTP_CAUSE_UNRECOG_CHUNK:
1247			sctp_process_unrecog_chunk(stcb, phdr, net);
1248			break;
1249		case SCTP_CAUSE_UNRECOG_PARAM:
1250			sctp_process_unrecog_param(stcb, phdr);
1251			break;
1252		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1253			/*
1254			 * We ignore this since the timer will drive out a
1255			 * new cookie anyway and there timer will drive us
1256			 * to send a SHUTDOWN_COMPLETE. We can't send one
1257			 * here since we don't have their tag.
1258			 */
1259			break;
1260		case SCTP_CAUSE_DELETING_LAST_ADDR:
1261		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1262		case SCTP_CAUSE_DELETING_SRC_ADDR:
1263			/*
1264			 * We should NOT get these here, but in a
1265			 * ASCONF-ACK.
1266			 */
1267			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1268			    error_type);
1269			break;
1270		case SCTP_CAUSE_OUT_OF_RESC:
1271			/*
1272			 * And what, pray tell do we do with the fact that
1273			 * the peer is out of resources? Not really sure we
1274			 * could do anything but abort. I suspect this
1275			 * should have came WITH an abort instead of in a
1276			 * OP-ERROR.
1277			 */
1278			break;
1279		default:
1280			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1281			    error_type);
1282			break;
1283		}
1284		adjust = SCTP_SIZE32(error_len);
1285		chklen -= adjust;
1286		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1287	}
1288	sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, error, ch, SCTP_SO_NOT_LOCKED);
1289	return (0);
1290}
1291
1292static int
1293sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1294    struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
1295    struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1296    struct sctp_nets *net, int *abort_no_unlock,
1297    uint8_t use_mflowid, uint32_t mflowid,
1298    uint32_t vrf_id)
1299{
1300	struct sctp_init_ack *init_ack;
1301	struct mbuf *op_err;
1302
1303	SCTPDBG(SCTP_DEBUG_INPUT2,
1304	    "sctp_handle_init_ack: handling INIT-ACK\n");
1305
1306	if (stcb == NULL) {
1307		SCTPDBG(SCTP_DEBUG_INPUT2,
1308		    "sctp_handle_init_ack: TCB is null\n");
1309		return (-1);
1310	}
1311	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1312		/* Invalid length */
1313		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1314		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1315		    src, dst, sh, op_err,
1316		    use_mflowid, mflowid,
1317		    vrf_id, net->port);
1318		*abort_no_unlock = 1;
1319		return (-1);
1320	}
1321	init_ack = &cp->init;
1322	/* validate parameters */
1323	if (init_ack->initiate_tag == 0) {
1324		/* protocol error... send an abort */
1325		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1326		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1327		    src, dst, sh, op_err,
1328		    use_mflowid, mflowid,
1329		    vrf_id, net->port);
1330		*abort_no_unlock = 1;
1331		return (-1);
1332	}
1333	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1334		/* protocol error... send an abort */
1335		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1336		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1337		    src, dst, sh, op_err,
1338		    use_mflowid, mflowid,
1339		    vrf_id, net->port);
1340		*abort_no_unlock = 1;
1341		return (-1);
1342	}
1343	if (init_ack->num_inbound_streams == 0) {
1344		/* protocol error... send an abort */
1345		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1346		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1347		    src, dst, sh, op_err,
1348		    use_mflowid, mflowid,
1349		    vrf_id, net->port);
1350		*abort_no_unlock = 1;
1351		return (-1);
1352	}
1353	if (init_ack->num_outbound_streams == 0) {
1354		/* protocol error... send an abort */
1355		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1356		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1357		    src, dst, sh, op_err,
1358		    use_mflowid, mflowid,
1359		    vrf_id, net->port);
1360		*abort_no_unlock = 1;
1361		return (-1);
1362	}
1363	/* process according to association state... */
1364	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1365	case SCTP_STATE_COOKIE_WAIT:
1366		/* this is the expected state for this chunk */
1367		/* process the INIT-ACK parameters */
1368		if (stcb->asoc.primary_destination->dest_state &
1369		    SCTP_ADDR_UNCONFIRMED) {
1370			/*
1371			 * The primary is where we sent the INIT, we can
1372			 * always consider it confirmed when the INIT-ACK is
1373			 * returned. Do this before we load addresses
1374			 * though.
1375			 */
1376			stcb->asoc.primary_destination->dest_state &=
1377			    ~SCTP_ADDR_UNCONFIRMED;
1378			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1379			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1380		}
1381		if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
1382		    net, abort_no_unlock,
1383		    use_mflowid, mflowid,
1384		    vrf_id) < 0) {
1385			/* error in parsing parameters */
1386			return (-1);
1387		}
1388		/* update our state */
1389		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1390		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1391
1392		/* reset the RTO calc */
1393		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1394			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1395			    stcb->asoc.overall_error_count,
1396			    0,
1397			    SCTP_FROM_SCTP_INPUT,
1398			    __LINE__);
1399		}
1400		stcb->asoc.overall_error_count = 0;
1401		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1402		/*
1403		 * collapse the init timer back in case of a exponential
1404		 * backoff
1405		 */
1406		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1407		    stcb, net);
1408		/*
1409		 * the send at the end of the inbound data processing will
1410		 * cause the cookie to be sent
1411		 */
1412		break;
1413	case SCTP_STATE_SHUTDOWN_SENT:
1414		/* incorrect state... discard */
1415		break;
1416	case SCTP_STATE_COOKIE_ECHOED:
1417		/* incorrect state... discard */
1418		break;
1419	case SCTP_STATE_OPEN:
1420		/* incorrect state... discard */
1421		break;
1422	case SCTP_STATE_EMPTY:
1423	case SCTP_STATE_INUSE:
1424	default:
1425		/* incorrect state... discard */
1426		return (-1);
1427		break;
1428	}
1429	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1430	return (0);
1431}
1432
1433static struct sctp_tcb *
1434sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1435    struct sockaddr *src, struct sockaddr *dst,
1436    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1437    struct sctp_inpcb *inp, struct sctp_nets **netp,
1438    struct sockaddr *init_src, int *notification,
1439    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1440    uint8_t use_mflowid, uint32_t mflowid,
1441    uint32_t vrf_id, uint16_t port);
1442
1443
1444/*
1445 * handle a state cookie for an existing association m: input packet mbuf
1446 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1447 * "split" mbuf and the cookie signature does not exist offset: offset into
1448 * mbuf to the cookie-echo chunk
1449 */
1450static struct sctp_tcb *
1451sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1452    struct sockaddr *src, struct sockaddr *dst,
1453    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1454    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1455    struct sockaddr *init_src, int *notification,
1456    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1457    uint8_t use_mflowid, uint32_t mflowid,
1458    uint32_t vrf_id, uint16_t port)
1459{
1460	struct sctp_association *asoc;
1461	struct sctp_init_chunk *init_cp, init_buf;
1462	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1463	struct sctp_nets *net;
1464	struct mbuf *op_err;
1465	struct sctp_paramhdr *ph;
1466	int init_offset, initack_offset, i;
1467	int retval;
1468	int spec_flag = 0;
1469	uint32_t how_indx;
1470
1471	net = *netp;
1472	/* I know that the TCB is non-NULL from the caller */
1473	asoc = &stcb->asoc;
1474	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1475		if (asoc->cookie_how[how_indx] == 0)
1476			break;
1477	}
1478	if (how_indx < sizeof(asoc->cookie_how)) {
1479		asoc->cookie_how[how_indx] = 1;
1480	}
1481	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1482		/* SHUTDOWN came in after sending INIT-ACK */
1483		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1484		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1485		    0, M_NOWAIT, 1, MT_DATA);
1486		if (op_err == NULL) {
1487			/* FOOBAR */
1488			return (NULL);
1489		}
1490		/* Set the len */
1491		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1492		ph = mtod(op_err, struct sctp_paramhdr *);
1493		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1494		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1495		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
1496		    use_mflowid, mflowid,
1497		    vrf_id, net->port);
1498		if (how_indx < sizeof(asoc->cookie_how))
1499			asoc->cookie_how[how_indx] = 2;
1500		return (NULL);
1501	}
1502	/*
1503	 * find and validate the INIT chunk in the cookie (peer's info) the
1504	 * INIT should start after the cookie-echo header struct (chunk
1505	 * header, state cookie header struct)
1506	 */
1507	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1508
1509	init_cp = (struct sctp_init_chunk *)
1510	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1511	    (uint8_t *) & init_buf);
1512	if (init_cp == NULL) {
1513		/* could not pull a INIT chunk in cookie */
1514		return (NULL);
1515	}
1516	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1517		return (NULL);
1518	}
1519	/*
1520	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1521	 * INIT-ACK follows the INIT chunk
1522	 */
1523	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1524	initack_cp = (struct sctp_init_ack_chunk *)
1525	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1526	    (uint8_t *) & initack_buf);
1527	if (initack_cp == NULL) {
1528		/* could not pull INIT-ACK chunk in cookie */
1529		return (NULL);
1530	}
1531	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1532		return (NULL);
1533	}
1534	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1535	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1536		/*
1537		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1538		 * to get into the OPEN state
1539		 */
1540		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1541			/*-
1542			 * Opps, this means that we somehow generated two vtag's
1543			 * the same. I.e. we did:
1544			 *  Us               Peer
1545			 *   <---INIT(tag=a)------
1546			 *   ----INIT-ACK(tag=t)-->
1547			 *   ----INIT(tag=t)------> *1
1548			 *   <---INIT-ACK(tag=a)---
1549                         *   <----CE(tag=t)------------- *2
1550			 *
1551			 * At point *1 we should be generating a different
1552			 * tag t'. Which means we would throw away the CE and send
1553			 * ours instead. Basically this is case C (throw away side).
1554			 */
1555			if (how_indx < sizeof(asoc->cookie_how))
1556				asoc->cookie_how[how_indx] = 17;
1557			return (NULL);
1558
1559		}
1560		switch SCTP_GET_STATE
1561			(asoc) {
1562		case SCTP_STATE_COOKIE_WAIT:
1563		case SCTP_STATE_COOKIE_ECHOED:
1564			/*
1565			 * INIT was sent but got a COOKIE_ECHO with the
1566			 * correct tags... just accept it...but we must
1567			 * process the init so that we can make sure we have
1568			 * the right seq no's.
1569			 */
1570			/* First we must process the INIT !! */
1571			retval = sctp_process_init(init_cp, stcb);
1572			if (retval < 0) {
1573				if (how_indx < sizeof(asoc->cookie_how))
1574					asoc->cookie_how[how_indx] = 3;
1575				return (NULL);
1576			}
1577			/* we have already processed the INIT so no problem */
1578			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1579			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1580			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1581			/* update current state */
1582			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1583				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1584			else
1585				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1586
1587			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1588			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1589				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1590				    stcb->sctp_ep, stcb, asoc->primary_destination);
1591			}
1592			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1593			sctp_stop_all_cookie_timers(stcb);
1594			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1595			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1596			    (inp->sctp_socket->so_qlimit == 0)
1597			    ) {
1598#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1599				struct socket *so;
1600
1601#endif
1602				/*
1603				 * Here is where collision would go if we
1604				 * did a connect() and instead got a
1605				 * init/init-ack/cookie done before the
1606				 * init-ack came back..
1607				 */
1608				stcb->sctp_ep->sctp_flags |=
1609				    SCTP_PCB_FLAGS_CONNECTED;
1610#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1611				so = SCTP_INP_SO(stcb->sctp_ep);
1612				atomic_add_int(&stcb->asoc.refcnt, 1);
1613				SCTP_TCB_UNLOCK(stcb);
1614				SCTP_SOCKET_LOCK(so, 1);
1615				SCTP_TCB_LOCK(stcb);
1616				atomic_add_int(&stcb->asoc.refcnt, -1);
1617				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1618					SCTP_SOCKET_UNLOCK(so, 1);
1619					return (NULL);
1620				}
1621#endif
1622				soisconnected(stcb->sctp_socket);
1623#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1624				SCTP_SOCKET_UNLOCK(so, 1);
1625#endif
1626			}
1627			/* notify upper layer */
1628			*notification = SCTP_NOTIFY_ASSOC_UP;
1629			/*
1630			 * since we did not send a HB make sure we don't
1631			 * double things
1632			 */
1633			net->hb_responded = 1;
1634			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1635			    &cookie->time_entered,
1636			    sctp_align_unsafe_makecopy,
1637			    SCTP_RTT_FROM_NON_DATA);
1638
1639			if (stcb->asoc.sctp_autoclose_ticks &&
1640			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1641				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1642				    inp, stcb, NULL);
1643			}
1644			break;
1645		default:
1646			/*
1647			 * we're in the OPEN state (or beyond), so peer must
1648			 * have simply lost the COOKIE-ACK
1649			 */
1650			break;
1651			}	/* end switch */
1652		sctp_stop_all_cookie_timers(stcb);
1653		/*
1654		 * We ignore the return code here.. not sure if we should
1655		 * somehow abort.. but we do have an existing asoc. This
1656		 * really should not fail.
1657		 */
1658		if (sctp_load_addresses_from_init(stcb, m,
1659		    init_offset + sizeof(struct sctp_init_chunk),
1660		    initack_offset, src, dst, init_src)) {
1661			if (how_indx < sizeof(asoc->cookie_how))
1662				asoc->cookie_how[how_indx] = 4;
1663			return (NULL);
1664		}
1665		/* respond with a COOKIE-ACK */
1666		sctp_toss_old_cookies(stcb, asoc);
1667		sctp_send_cookie_ack(stcb);
1668		if (how_indx < sizeof(asoc->cookie_how))
1669			asoc->cookie_how[how_indx] = 5;
1670		return (stcb);
1671	}
1672	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1673	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1674	    cookie->tie_tag_my_vtag == 0 &&
1675	    cookie->tie_tag_peer_vtag == 0) {
1676		/*
1677		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1678		 */
1679		if (how_indx < sizeof(asoc->cookie_how))
1680			asoc->cookie_how[how_indx] = 6;
1681		return (NULL);
1682	}
1683	/*
1684	 * If nat support, and the below and stcb is established, send back
1685	 * a ABORT(colliding state) if we are established.
1686	 */
1687	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
1688	    (asoc->peer_supports_nat) &&
1689	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1690	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1691	    (asoc->peer_vtag == 0)))) {
1692		/*
1693		 * Special case - Peer's support nat. We may have two init's
1694		 * that we gave out the same tag on since one was not
1695		 * established.. i.e. we get INIT from host-1 behind the nat
1696		 * and we respond tag-a, we get a INIT from host-2 behind
1697		 * the nat and we get tag-a again. Then we bring up host-1
1698		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1699		 * Now we have colliding state. We must send an abort here
1700		 * with colliding state indication.
1701		 */
1702		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1703		    0, M_NOWAIT, 1, MT_DATA);
1704		if (op_err == NULL) {
1705			/* FOOBAR */
1706			return (NULL);
1707		}
1708		/* pre-reserve some space */
1709#ifdef INET6
1710		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1711#else
1712		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
1713#endif
1714		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1715		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1716		/* Set the len */
1717		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1718		ph = mtod(op_err, struct sctp_paramhdr *);
1719		ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
1720		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1721		sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
1722		    use_mflowid, mflowid,
1723		    vrf_id, port);
1724		return (NULL);
1725	}
1726	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1727	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1728	    (asoc->peer_vtag == 0))) {
1729		/*
1730		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1731		 * should be ok, re-accept peer info
1732		 */
1733		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1734			/*
1735			 * Extension of case C. If we hit this, then the
1736			 * random number generator returned the same vtag
1737			 * when we first sent our INIT-ACK and when we later
1738			 * sent our INIT. The side with the seq numbers that
1739			 * are different will be the one that normnally
1740			 * would have hit case C. This in effect "extends"
1741			 * our vtags in this collision case to be 64 bits.
1742			 * The same collision could occur aka you get both
1743			 * vtag and seq number the same twice in a row.. but
1744			 * is much less likely. If it did happen then we
1745			 * would proceed through and bring up the assoc.. we
1746			 * may end up with the wrong stream setup however..
1747			 * which would be bad.. but there is no way to
1748			 * tell.. until we send on a stream that does not
1749			 * exist :-)
1750			 */
1751			if (how_indx < sizeof(asoc->cookie_how))
1752				asoc->cookie_how[how_indx] = 7;
1753
1754			return (NULL);
1755		}
1756		if (how_indx < sizeof(asoc->cookie_how))
1757			asoc->cookie_how[how_indx] = 8;
1758		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1759		sctp_stop_all_cookie_timers(stcb);
1760		/*
1761		 * since we did not send a HB make sure we don't double
1762		 * things
1763		 */
1764		net->hb_responded = 1;
1765		if (stcb->asoc.sctp_autoclose_ticks &&
1766		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1767			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1768			    NULL);
1769		}
1770		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1771		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1772
1773		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1774			/*
1775			 * Ok the peer probably discarded our data (if we
1776			 * echoed a cookie+data). So anything on the
1777			 * sent_queue should be marked for retransmit, we
1778			 * may not get something to kick us so it COULD
1779			 * still take a timeout to move these.. but it can't
1780			 * hurt to mark them.
1781			 */
1782			struct sctp_tmit_chunk *chk;
1783
1784			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1785				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1786					chk->sent = SCTP_DATAGRAM_RESEND;
1787					sctp_flight_size_decrease(chk);
1788					sctp_total_flight_decrease(stcb, chk);
1789					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1790					spec_flag++;
1791				}
1792			}
1793
1794		}
1795		/* process the INIT info (peer's info) */
1796		retval = sctp_process_init(init_cp, stcb);
1797		if (retval < 0) {
1798			if (how_indx < sizeof(asoc->cookie_how))
1799				asoc->cookie_how[how_indx] = 9;
1800			return (NULL);
1801		}
1802		if (sctp_load_addresses_from_init(stcb, m,
1803		    init_offset + sizeof(struct sctp_init_chunk),
1804		    initack_offset, src, dst, init_src)) {
1805			if (how_indx < sizeof(asoc->cookie_how))
1806				asoc->cookie_how[how_indx] = 10;
1807			return (NULL);
1808		}
1809		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1810		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1811			*notification = SCTP_NOTIFY_ASSOC_UP;
1812
1813			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1814			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1815			    (inp->sctp_socket->so_qlimit == 0)) {
1816#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1817				struct socket *so;
1818
1819#endif
1820				stcb->sctp_ep->sctp_flags |=
1821				    SCTP_PCB_FLAGS_CONNECTED;
1822#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1823				so = SCTP_INP_SO(stcb->sctp_ep);
1824				atomic_add_int(&stcb->asoc.refcnt, 1);
1825				SCTP_TCB_UNLOCK(stcb);
1826				SCTP_SOCKET_LOCK(so, 1);
1827				SCTP_TCB_LOCK(stcb);
1828				atomic_add_int(&stcb->asoc.refcnt, -1);
1829				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1830					SCTP_SOCKET_UNLOCK(so, 1);
1831					return (NULL);
1832				}
1833#endif
1834				soisconnected(stcb->sctp_socket);
1835#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1836				SCTP_SOCKET_UNLOCK(so, 1);
1837#endif
1838			}
1839			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1840				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1841			else
1842				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1843			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1844		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1845			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1846		} else {
1847			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1848		}
1849		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1850		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1851			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1852			    stcb->sctp_ep, stcb, asoc->primary_destination);
1853		}
1854		sctp_stop_all_cookie_timers(stcb);
1855		sctp_toss_old_cookies(stcb, asoc);
1856		sctp_send_cookie_ack(stcb);
1857		if (spec_flag) {
1858			/*
1859			 * only if we have retrans set do we do this. What
1860			 * this call does is get only the COOKIE-ACK out and
1861			 * then when we return the normal call to
1862			 * sctp_chunk_output will get the retrans out behind
1863			 * this.
1864			 */
1865			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1866		}
1867		if (how_indx < sizeof(asoc->cookie_how))
1868			asoc->cookie_how[how_indx] = 11;
1869
1870		return (stcb);
1871	}
1872	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1873	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1874	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1875	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1876	    cookie->tie_tag_peer_vtag != 0) {
1877		struct sctpasochead *head;
1878
1879#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1880		struct socket *so;
1881
1882#endif
1883
1884		if (asoc->peer_supports_nat) {
1885			/*
1886			 * This is a gross gross hack. Just call the
1887			 * cookie_new code since we are allowing a duplicate
1888			 * association. I hope this works...
1889			 */
1890			return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
1891			    sh, cookie, cookie_len,
1892			    inp, netp, init_src, notification,
1893			    auth_skipped, auth_offset, auth_len,
1894			    use_mflowid, mflowid,
1895			    vrf_id, port));
1896		}
1897		/*
1898		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1899		 */
1900		/* temp code */
1901		if (how_indx < sizeof(asoc->cookie_how))
1902			asoc->cookie_how[how_indx] = 12;
1903		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1904		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1905
1906		/* notify upper layer */
1907		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1908		atomic_add_int(&stcb->asoc.refcnt, 1);
1909		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1910		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1911		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1912			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1913		}
1914		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1915			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1916		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1917			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1918		}
1919		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1920			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1921			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1922			    stcb->sctp_ep, stcb, asoc->primary_destination);
1923
1924		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1925			/* move to OPEN state, if not in SHUTDOWN_SENT */
1926			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1927		}
1928		asoc->pre_open_streams =
1929		    ntohs(initack_cp->init.num_outbound_streams);
1930		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1931		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1932		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1933
1934		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1935
1936		asoc->str_reset_seq_in = asoc->init_seq_number;
1937
1938		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1939		if (asoc->mapping_array) {
1940			memset(asoc->mapping_array, 0,
1941			    asoc->mapping_array_size);
1942		}
1943		if (asoc->nr_mapping_array) {
1944			memset(asoc->nr_mapping_array, 0,
1945			    asoc->mapping_array_size);
1946		}
1947		SCTP_TCB_UNLOCK(stcb);
1948#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1949		so = SCTP_INP_SO(stcb->sctp_ep);
1950		SCTP_SOCKET_LOCK(so, 1);
1951#endif
1952		SCTP_INP_INFO_WLOCK();
1953		SCTP_INP_WLOCK(stcb->sctp_ep);
1954		SCTP_TCB_LOCK(stcb);
1955		atomic_add_int(&stcb->asoc.refcnt, -1);
1956		/* send up all the data */
1957		SCTP_TCB_SEND_LOCK(stcb);
1958
1959		sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED);
1960		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1961			stcb->asoc.strmout[i].chunks_on_queues = 0;
1962			stcb->asoc.strmout[i].stream_no = i;
1963			stcb->asoc.strmout[i].next_sequence_send = 0;
1964			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1965		}
1966		/* process the INIT-ACK info (my info) */
1967		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1968		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1969
1970		/* pull from vtag hash */
1971		LIST_REMOVE(stcb, sctp_asocs);
1972		/* re-insert to new vtag position */
1973		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1974		    SCTP_BASE_INFO(hashasocmark))];
1975		/*
1976		 * put it in the bucket in the vtag hash of assoc's for the
1977		 * system
1978		 */
1979		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1980
1981		SCTP_TCB_SEND_UNLOCK(stcb);
1982		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1983		SCTP_INP_INFO_WUNLOCK();
1984#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1985		SCTP_SOCKET_UNLOCK(so, 1);
1986#endif
1987		asoc->total_flight = 0;
1988		asoc->total_flight_count = 0;
1989		/* process the INIT info (peer's info) */
1990		retval = sctp_process_init(init_cp, stcb);
1991		if (retval < 0) {
1992			if (how_indx < sizeof(asoc->cookie_how))
1993				asoc->cookie_how[how_indx] = 13;
1994
1995			return (NULL);
1996		}
1997		/*
1998		 * since we did not send a HB make sure we don't double
1999		 * things
2000		 */
2001		net->hb_responded = 1;
2002
2003		if (sctp_load_addresses_from_init(stcb, m,
2004		    init_offset + sizeof(struct sctp_init_chunk),
2005		    initack_offset, src, dst, init_src)) {
2006			if (how_indx < sizeof(asoc->cookie_how))
2007				asoc->cookie_how[how_indx] = 14;
2008
2009			return (NULL);
2010		}
2011		/* respond with a COOKIE-ACK */
2012		sctp_stop_all_cookie_timers(stcb);
2013		sctp_toss_old_cookies(stcb, asoc);
2014		sctp_send_cookie_ack(stcb);
2015		if (how_indx < sizeof(asoc->cookie_how))
2016			asoc->cookie_how[how_indx] = 15;
2017
2018		return (stcb);
2019	}
2020	if (how_indx < sizeof(asoc->cookie_how))
2021		asoc->cookie_how[how_indx] = 16;
2022	/* all other cases... */
2023	return (NULL);
2024}
2025
2026
2027/*
2028 * handle a state cookie for a new association m: input packet mbuf chain--
2029 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
2030 * and the cookie signature does not exist offset: offset into mbuf to the
2031 * cookie-echo chunk length: length of the cookie chunk to: where the init
2032 * was from returns a new TCB
2033 */
2034static struct sctp_tcb *
2035sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
2036    struct sockaddr *src, struct sockaddr *dst,
2037    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
2038    struct sctp_inpcb *inp, struct sctp_nets **netp,
2039    struct sockaddr *init_src, int *notification,
2040    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2041    uint8_t use_mflowid, uint32_t mflowid,
2042    uint32_t vrf_id, uint16_t port)
2043{
2044	struct sctp_tcb *stcb;
2045	struct sctp_init_chunk *init_cp, init_buf;
2046	struct sctp_init_ack_chunk *initack_cp, initack_buf;
2047	struct sockaddr_storage sa_store;
2048	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
2049	struct sctp_association *asoc;
2050	int init_offset, initack_offset, initack_limit;
2051	int retval;
2052	int error = 0;
2053	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
2054
2055#ifdef INET
2056	struct sockaddr_in *sin;
2057
2058#endif
2059#ifdef INET6
2060	struct sockaddr_in6 *sin6;
2061
2062#endif
2063#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2064	struct socket *so;
2065
2066	so = SCTP_INP_SO(inp);
2067#endif
2068
2069	/*
2070	 * find and validate the INIT chunk in the cookie (peer's info) the
2071	 * INIT should start after the cookie-echo header struct (chunk
2072	 * header, state cookie header struct)
2073	 */
2074	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
2075	init_cp = (struct sctp_init_chunk *)
2076	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
2077	    (uint8_t *) & init_buf);
2078	if (init_cp == NULL) {
2079		/* could not pull a INIT chunk in cookie */
2080		SCTPDBG(SCTP_DEBUG_INPUT1,
2081		    "process_cookie_new: could not pull INIT chunk hdr\n");
2082		return (NULL);
2083	}
2084	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
2085		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
2086		return (NULL);
2087	}
2088	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
2089	/*
2090	 * find and validate the INIT-ACK chunk in the cookie (my info) the
2091	 * INIT-ACK follows the INIT chunk
2092	 */
2093	initack_cp = (struct sctp_init_ack_chunk *)
2094	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
2095	    (uint8_t *) & initack_buf);
2096	if (initack_cp == NULL) {
2097		/* could not pull INIT-ACK chunk in cookie */
2098		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2099		return (NULL);
2100	}
2101	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2102		return (NULL);
2103	}
2104	/*
2105	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2106	 * "initack_limit" value.  This is because the chk_length field
2107	 * includes the length of the cookie, but the cookie is omitted when
2108	 * the INIT and INIT_ACK are tacked onto the cookie...
2109	 */
2110	initack_limit = offset + cookie_len;
2111
2112	/*
2113	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2114	 * and popluate
2115	 */
2116
2117	/*
2118	 * Here we do a trick, we set in NULL for the proc/thread argument.
2119	 * We do this since in effect we only use the p argument when the
2120	 * socket is unbound and we must do an implicit bind. Since we are
2121	 * getting a cookie, we cannot be unbound.
2122	 */
2123	stcb = sctp_aloc_assoc(inp, init_src, &error,
2124	    ntohl(initack_cp->init.initiate_tag), vrf_id,
2125	    (struct thread *)NULL
2126	    );
2127	if (stcb == NULL) {
2128		struct mbuf *op_err;
2129
2130		/* memory problem? */
2131		SCTPDBG(SCTP_DEBUG_INPUT1,
2132		    "process_cookie_new: no room for another TCB!\n");
2133		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2134
2135		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2136		    src, dst, sh, op_err,
2137		    use_mflowid, mflowid,
2138		    vrf_id, port);
2139		return (NULL);
2140	}
2141	/* get the correct sctp_nets */
2142	if (netp)
2143		*netp = sctp_findnet(stcb, init_src);
2144
2145	asoc = &stcb->asoc;
2146	/* get scope variables out of cookie */
2147	asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
2148	asoc->scope.site_scope = cookie->site_scope;
2149	asoc->scope.local_scope = cookie->local_scope;
2150	asoc->scope.loopback_scope = cookie->loopback_scope;
2151
2152	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2153	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2154		struct mbuf *op_err;
2155
2156		/*
2157		 * Houston we have a problem. The EP changed while the
2158		 * cookie was in flight. Only recourse is to abort the
2159		 * association.
2160		 */
2161		atomic_add_int(&stcb->asoc.refcnt, 1);
2162		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2163		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2164		    src, dst, sh, op_err,
2165		    use_mflowid, mflowid,
2166		    vrf_id, port);
2167#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2168		SCTP_TCB_UNLOCK(stcb);
2169		SCTP_SOCKET_LOCK(so, 1);
2170		SCTP_TCB_LOCK(stcb);
2171#endif
2172		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2173		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2174#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2175		SCTP_SOCKET_UNLOCK(so, 1);
2176#endif
2177		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2178		return (NULL);
2179	}
2180	/* process the INIT-ACK info (my info) */
2181	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2182	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2183	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2184	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2185	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2186	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2187	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2188	asoc->str_reset_seq_in = asoc->init_seq_number;
2189
2190	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2191
2192	/* process the INIT info (peer's info) */
2193	if (netp)
2194		retval = sctp_process_init(init_cp, stcb);
2195	else
2196		retval = 0;
2197	if (retval < 0) {
2198		atomic_add_int(&stcb->asoc.refcnt, 1);
2199#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2200		SCTP_TCB_UNLOCK(stcb);
2201		SCTP_SOCKET_LOCK(so, 1);
2202		SCTP_TCB_LOCK(stcb);
2203#endif
2204		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2205#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2206		SCTP_SOCKET_UNLOCK(so, 1);
2207#endif
2208		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2209		return (NULL);
2210	}
2211	/* load all addresses */
2212	if (sctp_load_addresses_from_init(stcb, m,
2213	    init_offset + sizeof(struct sctp_init_chunk), initack_offset,
2214	    src, dst, init_src)) {
2215		atomic_add_int(&stcb->asoc.refcnt, 1);
2216#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2217		SCTP_TCB_UNLOCK(stcb);
2218		SCTP_SOCKET_LOCK(so, 1);
2219		SCTP_TCB_LOCK(stcb);
2220#endif
2221		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2222#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2223		SCTP_SOCKET_UNLOCK(so, 1);
2224#endif
2225		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2226		return (NULL);
2227	}
2228	/*
2229	 * verify any preceding AUTH chunk that was skipped
2230	 */
2231	/* pull the local authentication parameters from the cookie/init-ack */
2232	sctp_auth_get_cookie_params(stcb, m,
2233	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2234	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2235	if (auth_skipped) {
2236		struct sctp_auth_chunk *auth;
2237
2238		auth = (struct sctp_auth_chunk *)
2239		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2240		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2241			/* auth HMAC failed, dump the assoc and packet */
2242			SCTPDBG(SCTP_DEBUG_AUTH1,
2243			    "COOKIE-ECHO: AUTH failed\n");
2244			atomic_add_int(&stcb->asoc.refcnt, 1);
2245#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2246			SCTP_TCB_UNLOCK(stcb);
2247			SCTP_SOCKET_LOCK(so, 1);
2248			SCTP_TCB_LOCK(stcb);
2249#endif
2250			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2251#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2252			SCTP_SOCKET_UNLOCK(so, 1);
2253#endif
2254			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2255			return (NULL);
2256		} else {
2257			/* remaining chunks checked... good to go */
2258			stcb->asoc.authenticated = 1;
2259		}
2260	}
2261	/* update current state */
2262	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2263	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2264	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2265		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2266		    stcb->sctp_ep, stcb, asoc->primary_destination);
2267	}
2268	sctp_stop_all_cookie_timers(stcb);
2269	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2270	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2271
2272	/*
2273	 * if we're doing ASCONFs, check to see if we have any new local
2274	 * addresses that need to get added to the peer (eg. addresses
2275	 * changed while cookie echo in flight).  This needs to be done
2276	 * after we go to the OPEN state to do the correct asconf
2277	 * processing. else, make sure we have the correct addresses in our
2278	 * lists
2279	 */
2280
2281	/* warning, we re-use sin, sin6, sa_store here! */
2282	/* pull in local_address (our "from" address) */
2283	switch (cookie->laddr_type) {
2284#ifdef INET
2285	case SCTP_IPV4_ADDRESS:
2286		/* source addr is IPv4 */
2287		sin = (struct sockaddr_in *)initack_src;
2288		memset(sin, 0, sizeof(*sin));
2289		sin->sin_family = AF_INET;
2290		sin->sin_len = sizeof(struct sockaddr_in);
2291		sin->sin_addr.s_addr = cookie->laddress[0];
2292		break;
2293#endif
2294#ifdef INET6
2295	case SCTP_IPV6_ADDRESS:
2296		/* source addr is IPv6 */
2297		sin6 = (struct sockaddr_in6 *)initack_src;
2298		memset(sin6, 0, sizeof(*sin6));
2299		sin6->sin6_family = AF_INET6;
2300		sin6->sin6_len = sizeof(struct sockaddr_in6);
2301		sin6->sin6_scope_id = cookie->scope_id;
2302		memcpy(&sin6->sin6_addr, cookie->laddress,
2303		    sizeof(sin6->sin6_addr));
2304		break;
2305#endif
2306	default:
2307		atomic_add_int(&stcb->asoc.refcnt, 1);
2308#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2309		SCTP_TCB_UNLOCK(stcb);
2310		SCTP_SOCKET_LOCK(so, 1);
2311		SCTP_TCB_LOCK(stcb);
2312#endif
2313		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2314#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2315		SCTP_SOCKET_UNLOCK(so, 1);
2316#endif
2317		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2318		return (NULL);
2319	}
2320
2321	/* set up to notify upper layer */
2322	*notification = SCTP_NOTIFY_ASSOC_UP;
2323	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2324	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2325	    (inp->sctp_socket->so_qlimit == 0)) {
2326		/*
2327		 * This is an endpoint that called connect() how it got a
2328		 * cookie that is NEW is a bit of a mystery. It must be that
2329		 * the INIT was sent, but before it got there.. a complete
2330		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2331		 * should have went to the other code.. not here.. oh well..
2332		 * a bit of protection is worth having..
2333		 */
2334		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2335#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2336		atomic_add_int(&stcb->asoc.refcnt, 1);
2337		SCTP_TCB_UNLOCK(stcb);
2338		SCTP_SOCKET_LOCK(so, 1);
2339		SCTP_TCB_LOCK(stcb);
2340		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2341		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2342			SCTP_SOCKET_UNLOCK(so, 1);
2343			return (NULL);
2344		}
2345#endif
2346		soisconnected(stcb->sctp_socket);
2347#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2348		SCTP_SOCKET_UNLOCK(so, 1);
2349#endif
2350	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2351	    (inp->sctp_socket->so_qlimit)) {
2352		/*
2353		 * We don't want to do anything with this one. Since it is
2354		 * the listening guy. The timer will get started for
2355		 * accepted connections in the caller.
2356		 */
2357		;
2358	}
2359	/* since we did not send a HB make sure we don't double things */
2360	if ((netp) && (*netp))
2361		(*netp)->hb_responded = 1;
2362
2363	if (stcb->asoc.sctp_autoclose_ticks &&
2364	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2365		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2366	}
2367	/* calculate the RTT */
2368	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2369	if ((netp) && (*netp)) {
2370		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2371		    &cookie->time_entered, sctp_align_unsafe_makecopy,
2372		    SCTP_RTT_FROM_NON_DATA);
2373	}
2374	/* respond with a COOKIE-ACK */
2375	sctp_send_cookie_ack(stcb);
2376
2377	/*
2378	 * check the address lists for any ASCONFs that need to be sent
2379	 * AFTER the cookie-ack is sent
2380	 */
2381	sctp_check_address_list(stcb, m,
2382	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2383	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2384	    initack_src, cookie->local_scope, cookie->site_scope,
2385	    cookie->ipv4_scope, cookie->loopback_scope);
2386
2387
2388	return (stcb);
2389}
2390
2391/*
2392 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2393 * we NEED to make sure we are not already using the vtag. If so we
2394 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2395	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2396							    SCTP_BASE_INFO(hashasocmark))];
2397	LIST_FOREACH(stcb, head, sctp_asocs) {
2398	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2399		       -- SEND ABORT - TRY AGAIN --
2400		}
2401	}
2402*/
2403
2404/*
2405 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2406 * existing (non-NULL) TCB
2407 */
2408static struct mbuf *
2409sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2410    struct sockaddr *src, struct sockaddr *dst,
2411    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2412    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2413    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2414    struct sctp_tcb **locked_tcb,
2415    uint8_t use_mflowid, uint32_t mflowid,
2416    uint32_t vrf_id, uint16_t port)
2417{
2418	struct sctp_state_cookie *cookie;
2419	struct sctp_tcb *l_stcb = *stcb;
2420	struct sctp_inpcb *l_inp;
2421	struct sockaddr *to;
2422	struct sctp_pcb *ep;
2423	struct mbuf *m_sig;
2424	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2425	uint8_t *sig;
2426	uint8_t cookie_ok = 0;
2427	unsigned int sig_offset, cookie_offset;
2428	unsigned int cookie_len;
2429	struct timeval now;
2430	struct timeval time_expires;
2431	int notification = 0;
2432	struct sctp_nets *netl;
2433	int had_a_existing_tcb = 0;
2434	int send_int_conf = 0;
2435
2436#ifdef INET
2437	struct sockaddr_in sin;
2438
2439#endif
2440#ifdef INET6
2441	struct sockaddr_in6 sin6;
2442
2443#endif
2444
2445	SCTPDBG(SCTP_DEBUG_INPUT2,
2446	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2447
2448	if (inp_p == NULL) {
2449		return (NULL);
2450	}
2451	cookie = &cp->cookie;
2452	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2453	cookie_len = ntohs(cp->ch.chunk_length);
2454
2455	if ((cookie->peerport != sh->src_port) &&
2456	    (cookie->myport != sh->dest_port) &&
2457	    (cookie->my_vtag != sh->v_tag)) {
2458		/*
2459		 * invalid ports or bad tag.  Note that we always leave the
2460		 * v_tag in the header in network order and when we stored
2461		 * it in the my_vtag slot we also left it in network order.
2462		 * This maintains the match even though it may be in the
2463		 * opposite byte order of the machine :->
2464		 */
2465		return (NULL);
2466	}
2467	if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2468	    sizeof(struct sctp_init_chunk) +
2469	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2470		/* cookie too small */
2471		return (NULL);
2472	}
2473	/*
2474	 * split off the signature into its own mbuf (since it should not be
2475	 * calculated in the sctp_hmac_m() call).
2476	 */
2477	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2478	m_sig = m_split(m, sig_offset, M_NOWAIT);
2479	if (m_sig == NULL) {
2480		/* out of memory or ?? */
2481		return (NULL);
2482	}
2483#ifdef SCTP_MBUF_LOGGING
2484	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2485		struct mbuf *mat;
2486
2487		for (mat = m_sig; mat; mat = SCTP_BUF_NEXT(mat)) {
2488			if (SCTP_BUF_IS_EXTENDED(mat)) {
2489				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2490			}
2491		}
2492	}
2493#endif
2494
2495	/*
2496	 * compute the signature/digest for the cookie
2497	 */
2498	ep = &(*inp_p)->sctp_ep;
2499	l_inp = *inp_p;
2500	if (l_stcb) {
2501		SCTP_TCB_UNLOCK(l_stcb);
2502	}
2503	SCTP_INP_RLOCK(l_inp);
2504	if (l_stcb) {
2505		SCTP_TCB_LOCK(l_stcb);
2506	}
2507	/* which cookie is it? */
2508	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2509	    (ep->current_secret_number != ep->last_secret_number)) {
2510		/* it's the old cookie */
2511		(void)sctp_hmac_m(SCTP_HMAC,
2512		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2513		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2514	} else {
2515		/* it's the current cookie */
2516		(void)sctp_hmac_m(SCTP_HMAC,
2517		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2518		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2519	}
2520	/* get the signature */
2521	SCTP_INP_RUNLOCK(l_inp);
2522	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2523	if (sig == NULL) {
2524		/* couldn't find signature */
2525		sctp_m_freem(m_sig);
2526		return (NULL);
2527	}
2528	/* compare the received digest with the computed digest */
2529	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2530		/* try the old cookie? */
2531		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2532		    (ep->current_secret_number != ep->last_secret_number)) {
2533			/* compute digest with old */
2534			(void)sctp_hmac_m(SCTP_HMAC,
2535			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2536			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2537			/* compare */
2538			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2539				cookie_ok = 1;
2540		}
2541	} else {
2542		cookie_ok = 1;
2543	}
2544
2545	/*
2546	 * Now before we continue we must reconstruct our mbuf so that
2547	 * normal processing of any other chunks will work.
2548	 */
2549	{
2550		struct mbuf *m_at;
2551
2552		m_at = m;
2553		while (SCTP_BUF_NEXT(m_at) != NULL) {
2554			m_at = SCTP_BUF_NEXT(m_at);
2555		}
2556		SCTP_BUF_NEXT(m_at) = m_sig;
2557	}
2558
2559	if (cookie_ok == 0) {
2560		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2561		SCTPDBG(SCTP_DEBUG_INPUT2,
2562		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2563		    (uint32_t) offset, cookie_offset, sig_offset);
2564		return (NULL);
2565	}
2566	/*
2567	 * check the cookie timestamps to be sure it's not stale
2568	 */
2569	(void)SCTP_GETTIME_TIMEVAL(&now);
2570	/* Expire time is in Ticks, so we convert to seconds */
2571	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2572	time_expires.tv_usec = cookie->time_entered.tv_usec;
2573	/*
2574	 * TODO sctp_constants.h needs alternative time macros when _KERNEL
2575	 * is undefined.
2576	 */
2577	if (timevalcmp(&now, &time_expires, >)) {
2578		/* cookie is stale! */
2579		struct mbuf *op_err;
2580		struct sctp_stale_cookie_msg *scm;
2581		uint32_t tim;
2582
2583		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2584		    0, M_NOWAIT, 1, MT_DATA);
2585		if (op_err == NULL) {
2586			/* FOOBAR */
2587			return (NULL);
2588		}
2589		/* Set the len */
2590		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2591		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2592		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2593		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2594		    (sizeof(uint32_t))));
2595		/* seconds to usec */
2596		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2597		/* add in usec */
2598		if (tim == 0)
2599			tim = now.tv_usec - cookie->time_entered.tv_usec;
2600		scm->time_usec = htonl(tim);
2601		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
2602		    use_mflowid, mflowid,
2603		    vrf_id, port);
2604		return (NULL);
2605	}
2606	/*
2607	 * Now we must see with the lookup address if we have an existing
2608	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2609	 * and a INIT collided with us and somewhere the peer sent the
2610	 * cookie on another address besides the single address our assoc
2611	 * had for him. In this case we will have one of the tie-tags set at
2612	 * least AND the address field in the cookie can be used to look it
2613	 * up.
2614	 */
2615	to = NULL;
2616	switch (cookie->addr_type) {
2617#ifdef INET6
2618	case SCTP_IPV6_ADDRESS:
2619		memset(&sin6, 0, sizeof(sin6));
2620		sin6.sin6_family = AF_INET6;
2621		sin6.sin6_len = sizeof(sin6);
2622		sin6.sin6_port = sh->src_port;
2623		sin6.sin6_scope_id = cookie->scope_id;
2624		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2625		    sizeof(sin6.sin6_addr.s6_addr));
2626		to = (struct sockaddr *)&sin6;
2627		break;
2628#endif
2629#ifdef INET
2630	case SCTP_IPV4_ADDRESS:
2631		memset(&sin, 0, sizeof(sin));
2632		sin.sin_family = AF_INET;
2633		sin.sin_len = sizeof(sin);
2634		sin.sin_port = sh->src_port;
2635		sin.sin_addr.s_addr = cookie->address[0];
2636		to = (struct sockaddr *)&sin;
2637		break;
2638#endif
2639	default:
2640		/* This should not happen */
2641		return (NULL);
2642	}
2643	if ((*stcb == NULL) && to) {
2644		/* Yep, lets check */
2645		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
2646		if (*stcb == NULL) {
2647			/*
2648			 * We should have only got back the same inp. If we
2649			 * got back a different ep we have a problem. The
2650			 * original findep got back l_inp and now
2651			 */
2652			if (l_inp != *inp_p) {
2653				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2654			}
2655		} else {
2656			if (*locked_tcb == NULL) {
2657				/*
2658				 * In this case we found the assoc only
2659				 * after we locked the create lock. This
2660				 * means we are in a colliding case and we
2661				 * must make sure that we unlock the tcb if
2662				 * its one of the cases where we throw away
2663				 * the incoming packets.
2664				 */
2665				*locked_tcb = *stcb;
2666
2667				/*
2668				 * We must also increment the inp ref count
2669				 * since the ref_count flags was set when we
2670				 * did not find the TCB, now we found it
2671				 * which reduces the refcount.. we must
2672				 * raise it back out to balance it all :-)
2673				 */
2674				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2675				if ((*stcb)->sctp_ep != l_inp) {
2676					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2677					    (void *)(*stcb)->sctp_ep, (void *)l_inp);
2678				}
2679			}
2680		}
2681	}
2682	if (to == NULL) {
2683		return (NULL);
2684	}
2685	cookie_len -= SCTP_SIGNATURE_SIZE;
2686	if (*stcb == NULL) {
2687		/* this is the "normal" case... get a new TCB */
2688		*stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
2689		    cookie, cookie_len, *inp_p,
2690		    netp, to, &notification,
2691		    auth_skipped, auth_offset, auth_len,
2692		    use_mflowid, mflowid,
2693		    vrf_id, port);
2694	} else {
2695		/* this is abnormal... cookie-echo on existing TCB */
2696		had_a_existing_tcb = 1;
2697		*stcb = sctp_process_cookie_existing(m, iphlen, offset,
2698		    src, dst, sh,
2699		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2700		    &notification, auth_skipped, auth_offset, auth_len,
2701		    use_mflowid, mflowid,
2702		    vrf_id, port);
2703	}
2704
2705	if (*stcb == NULL) {
2706		/* still no TCB... must be bad cookie-echo */
2707		return (NULL);
2708	}
2709	if ((*netp != NULL) && (use_mflowid != 0)) {
2710		(*netp)->flowid = mflowid;
2711#ifdef INVARIANTS
2712		(*netp)->flowidset = 1;
2713#endif
2714	}
2715	/*
2716	 * Ok, we built an association so confirm the address we sent the
2717	 * INIT-ACK to.
2718	 */
2719	netl = sctp_findnet(*stcb, to);
2720	/*
2721	 * This code should in theory NOT run but
2722	 */
2723	if (netl == NULL) {
2724		/* TSNH! Huh, why do I need to add this address here? */
2725		if (sctp_add_remote_addr(*stcb, to, NULL, SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
2726			return (NULL);
2727		}
2728		netl = sctp_findnet(*stcb, to);
2729	}
2730	if (netl) {
2731		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2732			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2733			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2734			    netl);
2735			send_int_conf = 1;
2736		}
2737	}
2738	sctp_start_net_timers(*stcb);
2739	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2740		if (!had_a_existing_tcb ||
2741		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2742			/*
2743			 * If we have a NEW cookie or the connect never
2744			 * reached the connected state during collision we
2745			 * must do the TCP accept thing.
2746			 */
2747			struct socket *so, *oso;
2748			struct sctp_inpcb *inp;
2749
2750			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2751				/*
2752				 * For a restart we will keep the same
2753				 * socket, no need to do anything. I THINK!!
2754				 */
2755				sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2756				if (send_int_conf) {
2757					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2758					    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2759				}
2760				return (m);
2761			}
2762			oso = (*inp_p)->sctp_socket;
2763			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2764			SCTP_TCB_UNLOCK((*stcb));
2765			CURVNET_SET(oso->so_vnet);
2766			so = sonewconn(oso, 0
2767			    );
2768			CURVNET_RESTORE();
2769			SCTP_TCB_LOCK((*stcb));
2770			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2771
2772			if (so == NULL) {
2773				struct mbuf *op_err;
2774
2775#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2776				struct socket *pcb_so;
2777
2778#endif
2779				/* Too many sockets */
2780				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2781				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2782				sctp_abort_association(*inp_p, NULL, m, iphlen,
2783				    src, dst, sh, op_err,
2784				    use_mflowid, mflowid,
2785				    vrf_id, port);
2786#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2787				pcb_so = SCTP_INP_SO(*inp_p);
2788				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2789				SCTP_TCB_UNLOCK((*stcb));
2790				SCTP_SOCKET_LOCK(pcb_so, 1);
2791				SCTP_TCB_LOCK((*stcb));
2792				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2793#endif
2794				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2795#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2796				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2797#endif
2798				return (NULL);
2799			}
2800			inp = (struct sctp_inpcb *)so->so_pcb;
2801			SCTP_INP_INCR_REF(inp);
2802			/*
2803			 * We add the unbound flag here so that if we get an
2804			 * soabort() before we get the move_pcb done, we
2805			 * will properly cleanup.
2806			 */
2807			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2808			    SCTP_PCB_FLAGS_CONNECTED |
2809			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2810			    SCTP_PCB_FLAGS_UNBOUND |
2811			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2812			    SCTP_PCB_FLAGS_DONT_WAKE);
2813			inp->sctp_features = (*inp_p)->sctp_features;
2814			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2815			inp->sctp_socket = so;
2816			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2817			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
2818			inp->sctp_ecn_enable = (*inp_p)->sctp_ecn_enable;
2819			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2820			inp->sctp_context = (*inp_p)->sctp_context;
2821			inp->local_strreset_support = (*inp_p)->local_strreset_support;
2822			inp->inp_starting_point_for_iterator = NULL;
2823			/*
2824			 * copy in the authentication parameters from the
2825			 * original endpoint
2826			 */
2827			if (inp->sctp_ep.local_hmacs)
2828				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2829			inp->sctp_ep.local_hmacs =
2830			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2831			if (inp->sctp_ep.local_auth_chunks)
2832				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2833			inp->sctp_ep.local_auth_chunks =
2834			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2835
2836			/*
2837			 * Now we must move it from one hash table to
2838			 * another and get the tcb in the right place.
2839			 */
2840
2841			/*
2842			 * This is where the one-2-one socket is put into
2843			 * the accept state waiting for the accept!
2844			 */
2845			if (*stcb) {
2846				(*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE;
2847			}
2848			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2849
2850			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2851			SCTP_TCB_UNLOCK((*stcb));
2852
2853			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2854			    0);
2855			SCTP_TCB_LOCK((*stcb));
2856			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2857
2858
2859			/*
2860			 * now we must check to see if we were aborted while
2861			 * the move was going on and the lock/unlock
2862			 * happened.
2863			 */
2864			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2865				/*
2866				 * yep it was, we leave the assoc attached
2867				 * to the socket since the sctp_inpcb_free()
2868				 * call will send an abort for us.
2869				 */
2870				SCTP_INP_DECR_REF(inp);
2871				return (NULL);
2872			}
2873			SCTP_INP_DECR_REF(inp);
2874			/* Switch over to the new guy */
2875			*inp_p = inp;
2876			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2877			if (send_int_conf) {
2878				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2879				    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2880			}
2881			/*
2882			 * Pull it from the incomplete queue and wake the
2883			 * guy
2884			 */
2885#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2886			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2887			SCTP_TCB_UNLOCK((*stcb));
2888			SCTP_SOCKET_LOCK(so, 1);
2889#endif
2890			soisconnected(so);
2891#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2892			SCTP_TCB_LOCK((*stcb));
2893			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2894			SCTP_SOCKET_UNLOCK(so, 1);
2895#endif
2896			return (m);
2897		}
2898	}
2899	if (notification) {
2900		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2901	}
2902	if (send_int_conf) {
2903		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2904		    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2905	}
2906	return (m);
2907}
2908
2909static void
2910sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
2911    struct sctp_tcb *stcb, struct sctp_nets *net)
2912{
2913	/* cp must not be used, others call this without a c-ack :-) */
2914	struct sctp_association *asoc;
2915
2916	SCTPDBG(SCTP_DEBUG_INPUT2,
2917	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2918	if (stcb == NULL)
2919		return;
2920
2921	asoc = &stcb->asoc;
2922
2923	sctp_stop_all_cookie_timers(stcb);
2924	/* process according to association state */
2925	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2926		/* state change only needed when I am in right state */
2927		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2928		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2929		sctp_start_net_timers(stcb);
2930		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2931			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2932			    stcb->sctp_ep, stcb, asoc->primary_destination);
2933
2934		}
2935		/* update RTO */
2936		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2937		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2938		if (asoc->overall_error_count == 0) {
2939			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2940			    &asoc->time_entered, sctp_align_safe_nocopy,
2941			    SCTP_RTT_FROM_NON_DATA);
2942		}
2943		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2944		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2945		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2946		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2947#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2948			struct socket *so;
2949
2950#endif
2951			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2952#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2953			so = SCTP_INP_SO(stcb->sctp_ep);
2954			atomic_add_int(&stcb->asoc.refcnt, 1);
2955			SCTP_TCB_UNLOCK(stcb);
2956			SCTP_SOCKET_LOCK(so, 1);
2957			SCTP_TCB_LOCK(stcb);
2958			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2959#endif
2960			if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
2961				soisconnected(stcb->sctp_socket);
2962			}
2963#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2964			SCTP_SOCKET_UNLOCK(so, 1);
2965#endif
2966		}
2967		/*
2968		 * since we did not send a HB make sure we don't double
2969		 * things
2970		 */
2971		net->hb_responded = 1;
2972
2973		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2974			/*
2975			 * We don't need to do the asconf thing, nor hb or
2976			 * autoclose if the socket is closed.
2977			 */
2978			goto closed_socket;
2979		}
2980		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2981		    stcb, net);
2982
2983
2984		if (stcb->asoc.sctp_autoclose_ticks &&
2985		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2986			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2987			    stcb->sctp_ep, stcb, NULL);
2988		}
2989		/*
2990		 * send ASCONF if parameters are pending and ASCONFs are
2991		 * allowed (eg. addresses changed when init/cookie echo were
2992		 * in flight)
2993		 */
2994		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2995		    (stcb->asoc.peer_supports_asconf) &&
2996		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2997#ifdef SCTP_TIMER_BASED_ASCONF
2998			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2999			    stcb->sctp_ep, stcb,
3000			    stcb->asoc.primary_destination);
3001#else
3002			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
3003			    SCTP_ADDR_NOT_LOCKED);
3004#endif
3005		}
3006	}
3007closed_socket:
3008	/* Toss the cookie if I can */
3009	sctp_toss_old_cookies(stcb, asoc);
3010	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3011		/* Restart the timer if we have pending data */
3012		struct sctp_tmit_chunk *chk;
3013
3014		chk = TAILQ_FIRST(&asoc->sent_queue);
3015		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
3016	}
3017}
3018
3019static void
3020sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
3021    struct sctp_tcb *stcb)
3022{
3023	struct sctp_nets *net;
3024	struct sctp_tmit_chunk *lchk;
3025	struct sctp_ecne_chunk bkup;
3026	uint8_t override_bit;
3027	uint32_t tsn, window_data_tsn;
3028	int len;
3029	unsigned int pkt_cnt;
3030
3031	len = ntohs(cp->ch.chunk_length);
3032	if ((len != sizeof(struct sctp_ecne_chunk)) &&
3033	    (len != sizeof(struct old_sctp_ecne_chunk))) {
3034		return;
3035	}
3036	if (len == sizeof(struct old_sctp_ecne_chunk)) {
3037		/* Its the old format */
3038		memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
3039		bkup.num_pkts_since_cwr = htonl(1);
3040		cp = &bkup;
3041	}
3042	SCTP_STAT_INCR(sctps_recvecne);
3043	tsn = ntohl(cp->tsn);
3044	pkt_cnt = ntohl(cp->num_pkts_since_cwr);
3045	lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
3046	if (lchk == NULL) {
3047		window_data_tsn = stcb->asoc.sending_seq - 1;
3048	} else {
3049		window_data_tsn = lchk->rec.data.TSN_seq;
3050	}
3051
3052	/* Find where it was sent to if possible. */
3053	net = NULL;
3054	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
3055		if (lchk->rec.data.TSN_seq == tsn) {
3056			net = lchk->whoTo;
3057			net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
3058			break;
3059		}
3060		if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) {
3061			break;
3062		}
3063	}
3064	if (net == NULL) {
3065		/*
3066		 * What to do. A previous send of a CWR was possibly lost.
3067		 * See how old it is, we may have it marked on the actual
3068		 * net.
3069		 */
3070		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3071			if (tsn == net->last_cwr_tsn) {
3072				/* Found him, send it off */
3073				break;
3074			}
3075		}
3076		if (net == NULL) {
3077			/*
3078			 * If we reach here, we need to send a special CWR
3079			 * that says hey, we did this a long time ago and
3080			 * you lost the response.
3081			 */
3082			net = TAILQ_FIRST(&stcb->asoc.nets);
3083			if (net == NULL) {
3084				/* TSNH */
3085				return;
3086			}
3087			override_bit = SCTP_CWR_REDUCE_OVERRIDE;
3088		} else {
3089			override_bit = 0;
3090		}
3091	} else {
3092		override_bit = 0;
3093	}
3094	if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
3095	    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3096		/*
3097		 * JRS - Use the congestion control given in the pluggable
3098		 * CC module
3099		 */
3100		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
3101		/*
3102		 * We reduce once every RTT. So we will only lower cwnd at
3103		 * the next sending seq i.e. the window_data_tsn
3104		 */
3105		net->cwr_window_tsn = window_data_tsn;
3106		net->ecn_ce_pkt_cnt += pkt_cnt;
3107		net->lost_cnt = pkt_cnt;
3108		net->last_cwr_tsn = tsn;
3109	} else {
3110		override_bit |= SCTP_CWR_IN_SAME_WINDOW;
3111		if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
3112		    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3113			/*
3114			 * Another loss in the same window update how many
3115			 * marks/packets lost we have had.
3116			 */
3117			int cnt = 1;
3118
3119			if (pkt_cnt > net->lost_cnt) {
3120				/* Should be the case */
3121				cnt = (pkt_cnt - net->lost_cnt);
3122				net->ecn_ce_pkt_cnt += cnt;
3123			}
3124			net->lost_cnt = pkt_cnt;
3125			net->last_cwr_tsn = tsn;
3126			/*
3127			 * Most CC functions will ignore this call, since we
3128			 * are in-window yet of the initial CE the peer saw.
3129			 */
3130			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3131		}
3132	}
3133	/*
3134	 * We always send a CWR this way if our previous one was lost our
3135	 * peer will get an update, or if it is not time again to reduce we
3136	 * still get the cwr to the peer. Note we set the override when we
3137	 * could not find the TSN on the chunk or the destination network.
3138	 */
3139	sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3140}
3141
3142static void
3143sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3144{
3145	/*
3146	 * Here we get a CWR from the peer. We must look in the outqueue and
3147	 * make sure that we have a covered ECNE in the control chunk part.
3148	 * If so remove it.
3149	 */
3150	struct sctp_tmit_chunk *chk;
3151	struct sctp_ecne_chunk *ecne;
3152	int override;
3153	uint32_t cwr_tsn;
3154
3155	cwr_tsn = ntohl(cp->tsn);
3156
3157	override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3158	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
3159		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3160			continue;
3161		}
3162		if ((override == 0) && (chk->whoTo != net)) {
3163			/* Must be from the right src unless override is set */
3164			continue;
3165		}
3166		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3167		if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3168			/* this covers this ECNE, we can remove it */
3169			stcb->asoc.ecn_echo_cnt_onq--;
3170			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3171			    sctp_next);
3172			if (chk->data) {
3173				sctp_m_freem(chk->data);
3174				chk->data = NULL;
3175			}
3176			stcb->asoc.ctrl_queue_cnt--;
3177			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3178			if (override == 0) {
3179				break;
3180			}
3181		}
3182	}
3183}
3184
3185static void
3186sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
3187    struct sctp_tcb *stcb, struct sctp_nets *net)
3188{
3189	struct sctp_association *asoc;
3190
3191#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3192	struct socket *so;
3193
3194#endif
3195
3196	SCTPDBG(SCTP_DEBUG_INPUT2,
3197	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3198	if (stcb == NULL)
3199		return;
3200
3201	asoc = &stcb->asoc;
3202	/* process according to association state */
3203	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3204		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3205		SCTPDBG(SCTP_DEBUG_INPUT2,
3206		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3207		SCTP_TCB_UNLOCK(stcb);
3208		return;
3209	}
3210	/* notify upper layer protocol */
3211	if (stcb->sctp_socket) {
3212		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3213	}
3214#ifdef INVARIANTS
3215	if (!TAILQ_EMPTY(&asoc->send_queue) ||
3216	    !TAILQ_EMPTY(&asoc->sent_queue) ||
3217	    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
3218		panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
3219	}
3220#endif
3221	/* stop the timer */
3222	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
3223	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3224	/* free the TCB */
3225	SCTPDBG(SCTP_DEBUG_INPUT2,
3226	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3227#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3228	so = SCTP_INP_SO(stcb->sctp_ep);
3229	atomic_add_int(&stcb->asoc.refcnt, 1);
3230	SCTP_TCB_UNLOCK(stcb);
3231	SCTP_SOCKET_LOCK(so, 1);
3232	SCTP_TCB_LOCK(stcb);
3233	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3234#endif
3235	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3236#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3237	SCTP_SOCKET_UNLOCK(so, 1);
3238#endif
3239	return;
3240}
3241
3242static int
3243process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3244    struct sctp_nets *net, uint8_t flg)
3245{
3246	switch (desc->chunk_type) {
3247	case SCTP_DATA:
3248		/* find the tsn to resend (possibly */
3249		{
3250			uint32_t tsn;
3251			struct sctp_tmit_chunk *tp1;
3252
3253			tsn = ntohl(desc->tsn_ifany);
3254			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3255				if (tp1->rec.data.TSN_seq == tsn) {
3256					/* found it */
3257					break;
3258				}
3259				if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) {
3260					/* not found */
3261					tp1 = NULL;
3262					break;
3263				}
3264			}
3265			if (tp1 == NULL) {
3266				/*
3267				 * Do it the other way , aka without paying
3268				 * attention to queue seq order.
3269				 */
3270				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3271				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3272					if (tp1->rec.data.TSN_seq == tsn) {
3273						/* found it */
3274						break;
3275					}
3276				}
3277			}
3278			if (tp1 == NULL) {
3279				SCTP_STAT_INCR(sctps_pdrptsnnf);
3280			}
3281			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3282				uint8_t *ddp;
3283
3284				if (((flg & SCTP_BADCRC) == 0) &&
3285				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3286					return (0);
3287				}
3288				if ((stcb->asoc.peers_rwnd == 0) &&
3289				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3290					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3291					return (0);
3292				}
3293				if (stcb->asoc.peers_rwnd == 0 &&
3294				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3295					SCTP_STAT_INCR(sctps_pdrpdizrw);
3296					return (0);
3297				}
3298				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
3299				    sizeof(struct sctp_data_chunk));
3300				{
3301					unsigned int iii;
3302
3303					for (iii = 0; iii < sizeof(desc->data_bytes);
3304					    iii++) {
3305						if (ddp[iii] != desc->data_bytes[iii]) {
3306							SCTP_STAT_INCR(sctps_pdrpbadd);
3307							return (-1);
3308						}
3309					}
3310				}
3311
3312				if (tp1->do_rtt) {
3313					/*
3314					 * this guy had a RTO calculation
3315					 * pending on it, cancel it
3316					 */
3317					if (tp1->whoTo->rto_needed == 0) {
3318						tp1->whoTo->rto_needed = 1;
3319					}
3320					tp1->do_rtt = 0;
3321				}
3322				SCTP_STAT_INCR(sctps_pdrpmark);
3323				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3324					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3325				/*
3326				 * mark it as if we were doing a FR, since
3327				 * we will be getting gap ack reports behind
3328				 * the info from the router.
3329				 */
3330				tp1->rec.data.doing_fast_retransmit = 1;
3331				/*
3332				 * mark the tsn with what sequences can
3333				 * cause a new FR.
3334				 */
3335				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3336					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3337				} else {
3338					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
3339				}
3340
3341				/* restart the timer */
3342				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3343				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3344				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3345				    stcb, tp1->whoTo);
3346
3347				/* fix counts and things */
3348				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3349					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3350					    tp1->whoTo->flight_size,
3351					    tp1->book_size,
3352					    (uintptr_t) stcb,
3353					    tp1->rec.data.TSN_seq);
3354				}
3355				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3356					sctp_flight_size_decrease(tp1);
3357					sctp_total_flight_decrease(stcb, tp1);
3358				}
3359				tp1->sent = SCTP_DATAGRAM_RESEND;
3360			} {
3361				/* audit code */
3362				unsigned int audit;
3363
3364				audit = 0;
3365				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3366					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3367						audit++;
3368				}
3369				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3370				    sctp_next) {
3371					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3372						audit++;
3373				}
3374				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3375					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3376					    audit, stcb->asoc.sent_queue_retran_cnt);
3377#ifndef SCTP_AUDITING_ENABLED
3378					stcb->asoc.sent_queue_retran_cnt = audit;
3379#endif
3380				}
3381			}
3382		}
3383		break;
3384	case SCTP_ASCONF:
3385		{
3386			struct sctp_tmit_chunk *asconf;
3387
3388			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3389			    sctp_next) {
3390				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3391					break;
3392				}
3393			}
3394			if (asconf) {
3395				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3396					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3397				asconf->sent = SCTP_DATAGRAM_RESEND;
3398				asconf->snd_count--;
3399			}
3400		}
3401		break;
3402	case SCTP_INITIATION:
3403		/* resend the INIT */
3404		stcb->asoc.dropped_special_cnt++;
3405		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3406			/*
3407			 * If we can get it in, in a few attempts we do
3408			 * this, otherwise we let the timer fire.
3409			 */
3410			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3411			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3412			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3413		}
3414		break;
3415	case SCTP_SELECTIVE_ACK:
3416	case SCTP_NR_SELECTIVE_ACK:
3417		/* resend the sack */
3418		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
3419		break;
3420	case SCTP_HEARTBEAT_REQUEST:
3421		/* resend a demand HB */
3422		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3423			/*
3424			 * Only retransmit if we KNOW we wont destroy the
3425			 * tcb
3426			 */
3427			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
3428		}
3429		break;
3430	case SCTP_SHUTDOWN:
3431		sctp_send_shutdown(stcb, net);
3432		break;
3433	case SCTP_SHUTDOWN_ACK:
3434		sctp_send_shutdown_ack(stcb, net);
3435		break;
3436	case SCTP_COOKIE_ECHO:
3437		{
3438			struct sctp_tmit_chunk *cookie;
3439
3440			cookie = NULL;
3441			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3442			    sctp_next) {
3443				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3444					break;
3445				}
3446			}
3447			if (cookie) {
3448				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3449					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3450				cookie->sent = SCTP_DATAGRAM_RESEND;
3451				sctp_stop_all_cookie_timers(stcb);
3452			}
3453		}
3454		break;
3455	case SCTP_COOKIE_ACK:
3456		sctp_send_cookie_ack(stcb);
3457		break;
3458	case SCTP_ASCONF_ACK:
3459		/* resend last asconf ack */
3460		sctp_send_asconf_ack(stcb);
3461		break;
3462	case SCTP_FORWARD_CUM_TSN:
3463		send_forward_tsn(stcb, &stcb->asoc);
3464		break;
3465		/* can't do anything with these */
3466	case SCTP_PACKET_DROPPED:
3467	case SCTP_INITIATION_ACK:	/* this should not happen */
3468	case SCTP_HEARTBEAT_ACK:
3469	case SCTP_ABORT_ASSOCIATION:
3470	case SCTP_OPERATION_ERROR:
3471	case SCTP_SHUTDOWN_COMPLETE:
3472	case SCTP_ECN_ECHO:
3473	case SCTP_ECN_CWR:
3474	default:
3475		break;
3476	}
3477	return (0);
3478}
3479
3480void
3481sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t * list)
3482{
3483	uint32_t i;
3484	uint16_t temp;
3485
3486	/*
3487	 * We set things to 0xffff since this is the last delivered sequence
3488	 * and we will be sending in 0 after the reset.
3489	 */
3490
3491	if (number_entries) {
3492		for (i = 0; i < number_entries; i++) {
3493			temp = ntohs(list[i]);
3494			if (temp >= stcb->asoc.streamincnt) {
3495				continue;
3496			}
3497			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3498		}
3499	} else {
3500		list = NULL;
3501		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3502			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3503		}
3504	}
3505	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3506}
3507
3508static void
3509sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t * list)
3510{
3511	uint32_t i;
3512	uint16_t temp;
3513
3514	if (number_entries > 0) {
3515		for (i = 0; i < number_entries; i++) {
3516			temp = ntohs(list[i]);
3517			if (temp >= stcb->asoc.streamoutcnt) {
3518				/* no such stream */
3519				continue;
3520			}
3521			stcb->asoc.strmout[temp].next_sequence_send = 0;
3522		}
3523	} else {
3524		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3525			stcb->asoc.strmout[i].next_sequence_send = 0;
3526		}
3527	}
3528	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3529}
3530
3531
3532struct sctp_stream_reset_out_request *
3533sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3534{
3535	struct sctp_association *asoc;
3536	struct sctp_chunkhdr *ch;
3537	struct sctp_stream_reset_out_request *r;
3538	struct sctp_tmit_chunk *chk;
3539	int len, clen;
3540
3541	asoc = &stcb->asoc;
3542	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3543		asoc->stream_reset_outstanding = 0;
3544		return (NULL);
3545	}
3546	if (stcb->asoc.str_reset == NULL) {
3547		asoc->stream_reset_outstanding = 0;
3548		return (NULL);
3549	}
3550	chk = stcb->asoc.str_reset;
3551	if (chk->data == NULL) {
3552		return (NULL);
3553	}
3554	if (bchk) {
3555		/* he wants a copy of the chk pointer */
3556		*bchk = chk;
3557	}
3558	clen = chk->send_size;
3559	ch = mtod(chk->data, struct sctp_chunkhdr *);
3560	r = (struct sctp_stream_reset_out_request *)(ch + 1);
3561	if (ntohl(r->request_seq) == seq) {
3562		/* found it */
3563		return (r);
3564	}
3565	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3566	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3567		/* move to the next one, there can only be a max of two */
3568		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3569		if (ntohl(r->request_seq) == seq) {
3570			return (r);
3571		}
3572	}
3573	/* that seq is not here */
3574	return (NULL);
3575}
3576
3577static void
3578sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3579{
3580	struct sctp_association *asoc;
3581	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3582
3583	if (stcb->asoc.str_reset == NULL) {
3584		return;
3585	}
3586	asoc = &stcb->asoc;
3587
3588	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3589	TAILQ_REMOVE(&asoc->control_send_queue,
3590	    chk,
3591	    sctp_next);
3592	if (chk->data) {
3593		sctp_m_freem(chk->data);
3594		chk->data = NULL;
3595	}
3596	asoc->ctrl_queue_cnt--;
3597	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3598	/* sa_ignore NO_NULL_CHK */
3599	stcb->asoc.str_reset = NULL;
3600}
3601
3602
3603static int
3604sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3605    uint32_t seq, uint32_t action,
3606    struct sctp_stream_reset_response *respin)
3607{
3608	uint16_t type;
3609	int lparm_len;
3610	struct sctp_association *asoc = &stcb->asoc;
3611	struct sctp_tmit_chunk *chk;
3612	struct sctp_stream_reset_out_request *srparam;
3613	uint32_t number_entries;
3614
3615	if (asoc->stream_reset_outstanding == 0) {
3616		/* duplicate */
3617		return (0);
3618	}
3619	if (seq == stcb->asoc.str_reset_seq_out) {
3620		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3621		if (srparam) {
3622			stcb->asoc.str_reset_seq_out++;
3623			type = ntohs(srparam->ph.param_type);
3624			lparm_len = ntohs(srparam->ph.param_length);
3625			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3626				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3627				asoc->stream_reset_out_is_outstanding = 0;
3628				if (asoc->stream_reset_outstanding)
3629					asoc->stream_reset_outstanding--;
3630				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3631					/* do it */
3632					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3633				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3634					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3635				} else {
3636					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3637				}
3638			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3639				/* Answered my request */
3640				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3641				if (asoc->stream_reset_outstanding)
3642					asoc->stream_reset_outstanding--;
3643				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3644					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
3645					    number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3646				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3647					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
3648					    number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3649				}
3650			} else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
3651				/* Ok we now may have more streams */
3652				int num_stream;
3653
3654				num_stream = stcb->asoc.strm_pending_add_size;
3655				if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
3656					/* TSNH */
3657					num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
3658				}
3659				stcb->asoc.strm_pending_add_size = 0;
3660				if (asoc->stream_reset_outstanding)
3661					asoc->stream_reset_outstanding--;
3662				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3663					/* Put the new streams into effect */
3664					stcb->asoc.streamoutcnt += num_stream;
3665					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
3666				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3667					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3668					    SCTP_STREAM_CHANGE_DENIED);
3669				} else {
3670					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3671					    SCTP_STREAM_CHANGE_FAILED);
3672				}
3673			} else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
3674				if (asoc->stream_reset_outstanding)
3675					asoc->stream_reset_outstanding--;
3676				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3677					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3678					    SCTP_STREAM_CHANGE_DENIED);
3679				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3680					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3681					    SCTP_STREAM_CHANGE_FAILED);
3682				}
3683			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3684				/**
3685				 * a) Adopt the new in tsn.
3686				 * b) reset the map
3687				 * c) Adopt the new out-tsn
3688				 */
3689				struct sctp_stream_reset_response_tsn *resp;
3690				struct sctp_forward_tsn_chunk fwdtsn;
3691				int abort_flag = 0;
3692
3693				if (respin == NULL) {
3694					/* huh ? */
3695					return (0);
3696				}
3697				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3698					resp = (struct sctp_stream_reset_response_tsn *)respin;
3699					asoc->stream_reset_outstanding--;
3700					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3701					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3702					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3703					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3704					if (abort_flag) {
3705						return (1);
3706					}
3707					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3708					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3709						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3710					}
3711					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3712					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3713					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3714
3715					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3716					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3717
3718					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3719					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3720
3721					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3722					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3723					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
3724				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3725					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3726					    SCTP_ASSOC_RESET_DENIED);
3727				} else {
3728					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3729					    SCTP_ASSOC_RESET_FAILED);
3730				}
3731			}
3732			/* get rid of the request and get the request flags */
3733			if (asoc->stream_reset_outstanding == 0) {
3734				sctp_clean_up_stream_reset(stcb);
3735			}
3736		}
3737	}
3738	return (0);
3739}
3740
3741static void
3742sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3743    struct sctp_tmit_chunk *chk,
3744    struct sctp_stream_reset_in_request *req, int trunc)
3745{
3746	uint32_t seq;
3747	int len, i;
3748	int number_entries;
3749	uint16_t temp;
3750
3751	/*
3752	 * peer wants me to send a str-reset to him for my outgoing seq's if
3753	 * seq_in is right.
3754	 */
3755	struct sctp_association *asoc = &stcb->asoc;
3756
3757	seq = ntohl(req->request_seq);
3758	if (asoc->str_reset_seq_in == seq) {
3759		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3760		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
3761			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3762		} else if (trunc) {
3763			/* Can't do it, since they exceeded our buffer size  */
3764			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3765		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3766			len = ntohs(req->ph.param_length);
3767			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3768			for (i = 0; i < number_entries; i++) {
3769				temp = ntohs(req->list_of_streams[i]);
3770				req->list_of_streams[i] = temp;
3771			}
3772			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3773			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3774			    asoc->str_reset_seq_out,
3775			    seq, (asoc->sending_seq - 1));
3776			asoc->stream_reset_out_is_outstanding = 1;
3777			asoc->str_reset = chk;
3778			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3779			stcb->asoc.stream_reset_outstanding++;
3780		} else {
3781			/* Can't do it, since we have sent one out */
3782			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
3783		}
3784		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3785		asoc->str_reset_seq_in++;
3786	} else if (asoc->str_reset_seq_in - 1 == seq) {
3787		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3788	} else if (asoc->str_reset_seq_in - 2 == seq) {
3789		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3790	} else {
3791		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3792	}
3793}
3794
3795static int
3796sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3797    struct sctp_tmit_chunk *chk,
3798    struct sctp_stream_reset_tsn_request *req)
3799{
3800	/* reset all in and out and update the tsn */
3801	/*
3802	 * A) reset my str-seq's on in and out. B) Select a receive next,
3803	 * and set cum-ack to it. Also process this selected number as a
3804	 * fwd-tsn as well. C) set in the response my next sending seq.
3805	 */
3806	struct sctp_forward_tsn_chunk fwdtsn;
3807	struct sctp_association *asoc = &stcb->asoc;
3808	int abort_flag = 0;
3809	uint32_t seq;
3810
3811	seq = ntohl(req->request_seq);
3812	if (asoc->str_reset_seq_in == seq) {
3813		asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
3814		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
3815			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3816		} else {
3817			fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3818			fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3819			fwdtsn.ch.chunk_flags = 0;
3820			fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3821			sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3822			if (abort_flag) {
3823				return (1);
3824			}
3825			asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3826			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3827				sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3828			}
3829			asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
3830			asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
3831			memset(asoc->mapping_array, 0, asoc->mapping_array_size);
3832			asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
3833			memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
3834			atomic_add_int(&asoc->sending_seq, 1);
3835			/* save off historical data for retrans */
3836			asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
3837			asoc->last_sending_seq[0] = asoc->sending_seq;
3838			asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
3839			asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
3840			sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3841			sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3842			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3843			sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
3844		}
3845		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3846		    asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3847		asoc->str_reset_seq_in++;
3848	} else if (asoc->str_reset_seq_in - 1 == seq) {
3849		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3850		    asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
3851	} else if (asoc->str_reset_seq_in - 2 == seq) {
3852		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3853		    asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
3854	} else {
3855		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3856	}
3857	return (0);
3858}
3859
3860static void
3861sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3862    struct sctp_tmit_chunk *chk,
3863    struct sctp_stream_reset_out_request *req, int trunc)
3864{
3865	uint32_t seq, tsn;
3866	int number_entries, len;
3867	struct sctp_association *asoc = &stcb->asoc;
3868
3869	seq = ntohl(req->request_seq);
3870
3871	/* now if its not a duplicate we process it */
3872	if (asoc->str_reset_seq_in == seq) {
3873		len = ntohs(req->ph.param_length);
3874		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3875		/*
3876		 * the sender is resetting, handle the list issue.. we must
3877		 * a) verify if we can do the reset, if so no problem b) If
3878		 * we can't do the reset we must copy the request. c) queue
3879		 * it, and setup the data in processor to trigger it off
3880		 * when needed and dequeue all the queued data.
3881		 */
3882		tsn = ntohl(req->send_reset_at_tsn);
3883
3884		/* move the reset action back one */
3885		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3886		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
3887			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3888		} else if (trunc) {
3889			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3890		} else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
3891			/* we can do it now */
3892			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3893			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3894		} else {
3895			/*
3896			 * we must queue it up and thus wait for the TSN's
3897			 * to arrive that are at or before tsn
3898			 */
3899			struct sctp_stream_reset_list *liste;
3900			int siz;
3901
3902			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3903			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3904			    siz, SCTP_M_STRESET);
3905			if (liste == NULL) {
3906				/* gak out of memory */
3907				asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3908				sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3909				return;
3910			}
3911			liste->tsn = tsn;
3912			liste->number_entries = number_entries;
3913			memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
3914			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3915			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3916		}
3917		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3918		asoc->str_reset_seq_in++;
3919	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3920		/*
3921		 * one seq back, just echo back last action since my
3922		 * response was lost.
3923		 */
3924		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3925	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3926		/*
3927		 * two seq back, just echo back last action since my
3928		 * response was lost.
3929		 */
3930		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3931	} else {
3932		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
3933	}
3934}
3935
3936static void
3937sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3938    struct sctp_stream_reset_add_strm *str_add)
3939{
3940	/*
3941	 * Peer is requesting to add more streams. If its within our
3942	 * max-streams we will allow it.
3943	 */
3944	uint32_t num_stream, i;
3945	uint32_t seq;
3946	struct sctp_association *asoc = &stcb->asoc;
3947	struct sctp_queued_to_read *ctl, *nctl;
3948
3949	/* Get the number. */
3950	seq = ntohl(str_add->request_seq);
3951	num_stream = ntohs(str_add->number_of_streams);
3952	/* Now what would be the new total? */
3953	if (asoc->str_reset_seq_in == seq) {
3954		num_stream += stcb->asoc.streamincnt;
3955		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3956		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
3957			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3958		} else if ((num_stream > stcb->asoc.max_inbound_streams) ||
3959		    (num_stream > 0xffff)) {
3960			/* We must reject it they ask for to many */
3961	denied:
3962			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3963		} else {
3964			/* Ok, we can do that :-) */
3965			struct sctp_stream_in *oldstrm;
3966
3967			/* save off the old */
3968			oldstrm = stcb->asoc.strmin;
3969			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
3970			    (num_stream * sizeof(struct sctp_stream_in)),
3971			    SCTP_M_STRMI);
3972			if (stcb->asoc.strmin == NULL) {
3973				stcb->asoc.strmin = oldstrm;
3974				goto denied;
3975			}
3976			/* copy off the old data */
3977			for (i = 0; i < stcb->asoc.streamincnt; i++) {
3978				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3979				stcb->asoc.strmin[i].stream_no = i;
3980				stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
3981				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
3982				/* now anything on those queues? */
3983				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next, nctl) {
3984					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next);
3985					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next);
3986				}
3987			}
3988			/* Init the new streams */
3989			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
3990				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3991				stcb->asoc.strmin[i].stream_no = i;
3992				stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3993				stcb->asoc.strmin[i].delivery_started = 0;
3994			}
3995			SCTP_FREE(oldstrm, SCTP_M_STRMI);
3996			/* update the size */
3997			stcb->asoc.streamincnt = num_stream;
3998			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
3999			sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
4000		}
4001		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4002		asoc->str_reset_seq_in++;
4003	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4004		/*
4005		 * one seq back, just echo back last action since my
4006		 * response was lost.
4007		 */
4008		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4009	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4010		/*
4011		 * two seq back, just echo back last action since my
4012		 * response was lost.
4013		 */
4014		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4015	} else {
4016		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4017
4018	}
4019}
4020
4021static void
4022sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4023    struct sctp_stream_reset_add_strm *str_add)
4024{
4025	/*
4026	 * Peer is requesting to add more streams. If its within our
4027	 * max-streams we will allow it.
4028	 */
4029	uint16_t num_stream;
4030	uint32_t seq;
4031	struct sctp_association *asoc = &stcb->asoc;
4032
4033	/* Get the number. */
4034	seq = ntohl(str_add->request_seq);
4035	num_stream = ntohs(str_add->number_of_streams);
4036	/* Now what would be the new total? */
4037	if (asoc->str_reset_seq_in == seq) {
4038		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4039		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4040			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4041		} else if (stcb->asoc.stream_reset_outstanding) {
4042			/* We must reject it we have something pending */
4043			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4044		} else {
4045			/* Ok, we can do that :-) */
4046			int mychk;
4047
4048			mychk = stcb->asoc.streamoutcnt;
4049			mychk += num_stream;
4050			if (mychk < 0x10000) {
4051				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4052				if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 0, 1, num_stream, 0, 1)) {
4053					stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4054				}
4055			} else {
4056				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4057			}
4058		}
4059		sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
4060		asoc->str_reset_seq_in++;
4061	} else if ((asoc->str_reset_seq_in - 1) == seq) {
4062		/*
4063		 * one seq back, just echo back last action since my
4064		 * response was lost.
4065		 */
4066		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4067	} else if ((asoc->str_reset_seq_in - 2) == seq) {
4068		/*
4069		 * two seq back, just echo back last action since my
4070		 * response was lost.
4071		 */
4072		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4073	} else {
4074		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4075	}
4076}
4077
4078#ifdef __GNUC__
4079__attribute__((noinline))
4080#endif
4081	static int
4082	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
4083        struct sctp_chunkhdr *ch_req)
4084{
4085	int chk_length, param_len, ptype;
4086	struct sctp_paramhdr pstore;
4087	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
4088	uint32_t seq = 0;
4089	int num_req = 0;
4090	int trunc = 0;
4091	struct sctp_tmit_chunk *chk;
4092	struct sctp_chunkhdr *ch;
4093	struct sctp_paramhdr *ph;
4094	int ret_code = 0;
4095	int num_param = 0;
4096
4097	/* now it may be a reset or a reset-response */
4098	chk_length = ntohs(ch_req->chunk_length);
4099
4100	/* setup for adding the response */
4101	sctp_alloc_a_chunk(stcb, chk);
4102	if (chk == NULL) {
4103		return (ret_code);
4104	}
4105	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
4106	chk->rec.chunk_id.can_take_data = 0;
4107	chk->asoc = &stcb->asoc;
4108	chk->no_fr_allowed = 0;
4109	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
4110	chk->book_size_scale = 0;
4111	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4112	if (chk->data == NULL) {
4113strres_nochunk:
4114		if (chk->data) {
4115			sctp_m_freem(chk->data);
4116			chk->data = NULL;
4117		}
4118		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
4119		return (ret_code);
4120	}
4121	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
4122
4123	/* setup chunk parameters */
4124	chk->sent = SCTP_DATAGRAM_UNSENT;
4125	chk->snd_count = 0;
4126	chk->whoTo = NULL;
4127
4128	ch = mtod(chk->data, struct sctp_chunkhdr *);
4129	ch->chunk_type = SCTP_STREAM_RESET;
4130	ch->chunk_flags = 0;
4131	ch->chunk_length = htons(chk->send_size);
4132	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
4133	offset += sizeof(struct sctp_chunkhdr);
4134	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
4135		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
4136		if (ph == NULL)
4137			break;
4138		param_len = ntohs(ph->param_length);
4139		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
4140			/* bad param */
4141			break;
4142		}
4143		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
4144		    (uint8_t *) & cstore);
4145		ptype = ntohs(ph->param_type);
4146		num_param++;
4147		if (param_len > (int)sizeof(cstore)) {
4148			trunc = 1;
4149		} else {
4150			trunc = 0;
4151		}
4152		if (num_param > SCTP_MAX_RESET_PARAMS) {
4153			/* hit the max of parameters already sorry.. */
4154			break;
4155		}
4156		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
4157			struct sctp_stream_reset_out_request *req_out;
4158
4159			req_out = (struct sctp_stream_reset_out_request *)ph;
4160			num_req++;
4161			if (stcb->asoc.stream_reset_outstanding) {
4162				seq = ntohl(req_out->response_seq);
4163				if (seq == stcb->asoc.str_reset_seq_out) {
4164					/* implicit ack */
4165					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
4166				}
4167			}
4168			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
4169		} else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
4170			struct sctp_stream_reset_add_strm *str_add;
4171
4172			str_add = (struct sctp_stream_reset_add_strm *)ph;
4173			num_req++;
4174			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
4175		} else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
4176			struct sctp_stream_reset_add_strm *str_add;
4177
4178			str_add = (struct sctp_stream_reset_add_strm *)ph;
4179			num_req++;
4180			sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
4181		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
4182			struct sctp_stream_reset_in_request *req_in;
4183
4184			num_req++;
4185			req_in = (struct sctp_stream_reset_in_request *)ph;
4186			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
4187		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
4188			struct sctp_stream_reset_tsn_request *req_tsn;
4189
4190			num_req++;
4191			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
4192			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
4193				ret_code = 1;
4194				goto strres_nochunk;
4195			}
4196			/* no more */
4197			break;
4198		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
4199			struct sctp_stream_reset_response *resp;
4200			uint32_t result;
4201
4202			resp = (struct sctp_stream_reset_response *)ph;
4203			seq = ntohl(resp->response_seq);
4204			result = ntohl(resp->result);
4205			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4206				ret_code = 1;
4207				goto strres_nochunk;
4208			}
4209		} else {
4210			break;
4211		}
4212		offset += SCTP_SIZE32(param_len);
4213		chk_length -= SCTP_SIZE32(param_len);
4214	}
4215	if (num_req == 0) {
4216		/* we have no response free the stuff */
4217		goto strres_nochunk;
4218	}
4219	/* ok we have a chunk to link in */
4220	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4221	    chk,
4222	    sctp_next);
4223	stcb->asoc.ctrl_queue_cnt++;
4224	return (ret_code);
4225}
4226
4227/*
4228 * Handle a router or endpoints report of a packet loss, there are two ways
4229 * to handle this, either we get the whole packet and must disect it
4230 * ourselves (possibly with truncation and or corruption) or it is a summary
4231 * from a middle box that did the disectting for us.
4232 */
4233static void
4234sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4235    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4236{
4237	uint32_t bottle_bw, on_queue;
4238	uint16_t trunc_len;
4239	unsigned int chlen;
4240	unsigned int at;
4241	struct sctp_chunk_desc desc;
4242	struct sctp_chunkhdr *ch;
4243
4244	chlen = ntohs(cp->ch.chunk_length);
4245	chlen -= sizeof(struct sctp_pktdrop_chunk);
4246	/* XXX possible chlen underflow */
4247	if (chlen == 0) {
4248		ch = NULL;
4249		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4250			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4251	} else {
4252		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4253		chlen -= sizeof(struct sctphdr);
4254		/* XXX possible chlen underflow */
4255		memset(&desc, 0, sizeof(desc));
4256	}
4257	trunc_len = (uint16_t) ntohs(cp->trunc_len);
4258	if (trunc_len > limit) {
4259		trunc_len = limit;
4260	}
4261	/* now the chunks themselves */
4262	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4263		desc.chunk_type = ch->chunk_type;
4264		/* get amount we need to move */
4265		at = ntohs(ch->chunk_length);
4266		if (at < sizeof(struct sctp_chunkhdr)) {
4267			/* corrupt chunk, maybe at the end? */
4268			SCTP_STAT_INCR(sctps_pdrpcrupt);
4269			break;
4270		}
4271		if (trunc_len == 0) {
4272			/* we are supposed to have all of it */
4273			if (at > chlen) {
4274				/* corrupt skip it */
4275				SCTP_STAT_INCR(sctps_pdrpcrupt);
4276				break;
4277			}
4278		} else {
4279			/* is there enough of it left ? */
4280			if (desc.chunk_type == SCTP_DATA) {
4281				if (chlen < (sizeof(struct sctp_data_chunk) +
4282				    sizeof(desc.data_bytes))) {
4283					break;
4284				}
4285			} else {
4286				if (chlen < sizeof(struct sctp_chunkhdr)) {
4287					break;
4288				}
4289			}
4290		}
4291		if (desc.chunk_type == SCTP_DATA) {
4292			/* can we get out the tsn? */
4293			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4294				SCTP_STAT_INCR(sctps_pdrpmbda);
4295
4296			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4297				/* yep */
4298				struct sctp_data_chunk *dcp;
4299				uint8_t *ddp;
4300				unsigned int iii;
4301
4302				dcp = (struct sctp_data_chunk *)ch;
4303				ddp = (uint8_t *) (dcp + 1);
4304				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4305					desc.data_bytes[iii] = ddp[iii];
4306				}
4307				desc.tsn_ifany = dcp->dp.tsn;
4308			} else {
4309				/* nope we are done. */
4310				SCTP_STAT_INCR(sctps_pdrpnedat);
4311				break;
4312			}
4313		} else {
4314			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4315				SCTP_STAT_INCR(sctps_pdrpmbct);
4316		}
4317
4318		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4319			SCTP_STAT_INCR(sctps_pdrppdbrk);
4320			break;
4321		}
4322		if (SCTP_SIZE32(at) > chlen) {
4323			break;
4324		}
4325		chlen -= SCTP_SIZE32(at);
4326		if (chlen < sizeof(struct sctp_chunkhdr)) {
4327			/* done, none left */
4328			break;
4329		}
4330		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4331	}
4332	/* Now update any rwnd --- possibly */
4333	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4334		/* From a peer, we get a rwnd report */
4335		uint32_t a_rwnd;
4336
4337		SCTP_STAT_INCR(sctps_pdrpfehos);
4338
4339		bottle_bw = ntohl(cp->bottle_bw);
4340		on_queue = ntohl(cp->current_onq);
4341		if (bottle_bw && on_queue) {
4342			/* a rwnd report is in here */
4343			if (bottle_bw > on_queue)
4344				a_rwnd = bottle_bw - on_queue;
4345			else
4346				a_rwnd = 0;
4347
4348			if (a_rwnd == 0)
4349				stcb->asoc.peers_rwnd = 0;
4350			else {
4351				if (a_rwnd > stcb->asoc.total_flight) {
4352					stcb->asoc.peers_rwnd =
4353					    a_rwnd - stcb->asoc.total_flight;
4354				} else {
4355					stcb->asoc.peers_rwnd = 0;
4356				}
4357				if (stcb->asoc.peers_rwnd <
4358				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4359					/* SWS sender side engages */
4360					stcb->asoc.peers_rwnd = 0;
4361				}
4362			}
4363		}
4364	} else {
4365		SCTP_STAT_INCR(sctps_pdrpfmbox);
4366	}
4367
4368	/* now middle boxes in sat networks get a cwnd bump */
4369	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4370	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4371	    (stcb->asoc.sat_network)) {
4372		/*
4373		 * This is debateable but for sat networks it makes sense
4374		 * Note if a T3 timer has went off, we will prohibit any
4375		 * changes to cwnd until we exit the t3 loss recovery.
4376		 */
4377		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4378		    net, cp, &bottle_bw, &on_queue);
4379	}
4380}
4381
4382/*
4383 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4384 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4385 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4386 * length of the complete packet outputs: - length: modified to remaining
4387 * length after control processing - netp: modified to new sctp_nets after
4388 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4389 * bad packet,...) otherwise return the tcb for this packet
4390 */
4391#ifdef __GNUC__
4392__attribute__((noinline))
4393#endif
4394	static struct sctp_tcb *
4395	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4396             struct sockaddr *src, struct sockaddr *dst,
4397             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4398             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4399             uint8_t use_mflowid, uint32_t mflowid,
4400             uint32_t vrf_id, uint16_t port)
4401{
4402	struct sctp_association *asoc;
4403	uint32_t vtag_in;
4404	int num_chunks = 0;	/* number of control chunks processed */
4405	uint32_t chk_length;
4406	int ret;
4407	int abort_no_unlock = 0;
4408	int ecne_seen = 0;
4409
4410	/*
4411	 * How big should this be, and should it be alloc'd? Lets try the
4412	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4413	 * until we get into jumbo grams and such..
4414	 */
4415	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4416	struct sctp_tcb *locked_tcb = stcb;
4417	int got_auth = 0;
4418	uint32_t auth_offset = 0, auth_len = 0;
4419	int auth_skipped = 0;
4420	int asconf_cnt = 0;
4421
4422#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4423	struct socket *so;
4424
4425#endif
4426
4427	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4428	    iphlen, *offset, length, (void *)stcb);
4429
4430	/* validate chunk header length... */
4431	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4432		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4433		    ntohs(ch->chunk_length));
4434		if (locked_tcb) {
4435			SCTP_TCB_UNLOCK(locked_tcb);
4436		}
4437		return (NULL);
4438	}
4439	/*
4440	 * validate the verification tag
4441	 */
4442	vtag_in = ntohl(sh->v_tag);
4443
4444	if (locked_tcb) {
4445		SCTP_TCB_LOCK_ASSERT(locked_tcb);
4446	}
4447	if (ch->chunk_type == SCTP_INITIATION) {
4448		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4449		    ntohs(ch->chunk_length), vtag_in);
4450		if (vtag_in != 0) {
4451			/* protocol error- silently discard... */
4452			SCTP_STAT_INCR(sctps_badvtag);
4453			if (locked_tcb) {
4454				SCTP_TCB_UNLOCK(locked_tcb);
4455			}
4456			return (NULL);
4457		}
4458	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4459		/*
4460		 * If there is no stcb, skip the AUTH chunk and process
4461		 * later after a stcb is found (to validate the lookup was
4462		 * valid.
4463		 */
4464		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4465		    (stcb == NULL) &&
4466		    !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4467			/* save this chunk for later processing */
4468			auth_skipped = 1;
4469			auth_offset = *offset;
4470			auth_len = ntohs(ch->chunk_length);
4471
4472			/* (temporarily) move past this chunk */
4473			*offset += SCTP_SIZE32(auth_len);
4474			if (*offset >= length) {
4475				/* no more data left in the mbuf chain */
4476				*offset = length;
4477				if (locked_tcb) {
4478					SCTP_TCB_UNLOCK(locked_tcb);
4479				}
4480				return (NULL);
4481			}
4482			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4483			    sizeof(struct sctp_chunkhdr), chunk_buf);
4484		}
4485		if (ch == NULL) {
4486			/* Help */
4487			*offset = length;
4488			if (locked_tcb) {
4489				SCTP_TCB_UNLOCK(locked_tcb);
4490			}
4491			return (NULL);
4492		}
4493		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4494			goto process_control_chunks;
4495		}
4496		/*
4497		 * first check if it's an ASCONF with an unknown src addr we
4498		 * need to look inside to find the association
4499		 */
4500		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4501			struct sctp_chunkhdr *asconf_ch = ch;
4502			uint32_t asconf_offset = 0, asconf_len = 0;
4503
4504			/* inp's refcount may be reduced */
4505			SCTP_INP_INCR_REF(inp);
4506
4507			asconf_offset = *offset;
4508			do {
4509				asconf_len = ntohs(asconf_ch->chunk_length);
4510				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4511					break;
4512				stcb = sctp_findassociation_ep_asconf(m,
4513				    *offset,
4514				    dst,
4515				    sh, &inp, netp, vrf_id);
4516				if (stcb != NULL)
4517					break;
4518				asconf_offset += SCTP_SIZE32(asconf_len);
4519				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4520				    sizeof(struct sctp_chunkhdr), chunk_buf);
4521			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4522			if (stcb == NULL) {
4523				/*
4524				 * reduce inp's refcount if not reduced in
4525				 * sctp_findassociation_ep_asconf().
4526				 */
4527				SCTP_INP_DECR_REF(inp);
4528			} else {
4529				locked_tcb = stcb;
4530			}
4531
4532			/* now go back and verify any auth chunk to be sure */
4533			if (auth_skipped && (stcb != NULL)) {
4534				struct sctp_auth_chunk *auth;
4535
4536				auth = (struct sctp_auth_chunk *)
4537				    sctp_m_getptr(m, auth_offset,
4538				    auth_len, chunk_buf);
4539				got_auth = 1;
4540				auth_skipped = 0;
4541				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4542				    auth_offset)) {
4543					/* auth HMAC failed so dump it */
4544					*offset = length;
4545					if (locked_tcb) {
4546						SCTP_TCB_UNLOCK(locked_tcb);
4547					}
4548					return (NULL);
4549				} else {
4550					/* remaining chunks are HMAC checked */
4551					stcb->asoc.authenticated = 1;
4552				}
4553			}
4554		}
4555		if (stcb == NULL) {
4556			/* no association, so it's out of the blue... */
4557			sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp,
4558			    use_mflowid, mflowid,
4559			    vrf_id, port);
4560			*offset = length;
4561			if (locked_tcb) {
4562				SCTP_TCB_UNLOCK(locked_tcb);
4563			}
4564			return (NULL);
4565		}
4566		asoc = &stcb->asoc;
4567		/* ABORT and SHUTDOWN can use either v_tag... */
4568		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4569		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4570		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4571			if ((vtag_in == asoc->my_vtag) ||
4572			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
4573			    (vtag_in == asoc->peer_vtag))) {
4574				/* this is valid */
4575			} else {
4576				/* drop this packet... */
4577				SCTP_STAT_INCR(sctps_badvtag);
4578				if (locked_tcb) {
4579					SCTP_TCB_UNLOCK(locked_tcb);
4580				}
4581				return (NULL);
4582			}
4583		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4584			if (vtag_in != asoc->my_vtag) {
4585				/*
4586				 * this could be a stale SHUTDOWN-ACK or the
4587				 * peer never got the SHUTDOWN-COMPLETE and
4588				 * is still hung; we have started a new asoc
4589				 * but it won't complete until the shutdown
4590				 * is completed
4591				 */
4592				if (locked_tcb) {
4593					SCTP_TCB_UNLOCK(locked_tcb);
4594				}
4595				sctp_handle_ootb(m, iphlen, *offset, src, dst,
4596				    sh, inp,
4597				    use_mflowid, mflowid,
4598				    vrf_id, port);
4599				return (NULL);
4600			}
4601		} else {
4602			/* for all other chunks, vtag must match */
4603			if (vtag_in != asoc->my_vtag) {
4604				/* invalid vtag... */
4605				SCTPDBG(SCTP_DEBUG_INPUT3,
4606				    "invalid vtag: %xh, expect %xh\n",
4607				    vtag_in, asoc->my_vtag);
4608				SCTP_STAT_INCR(sctps_badvtag);
4609				if (locked_tcb) {
4610					SCTP_TCB_UNLOCK(locked_tcb);
4611				}
4612				*offset = length;
4613				return (NULL);
4614			}
4615		}
4616	}			/* end if !SCTP_COOKIE_ECHO */
4617	/*
4618	 * process all control chunks...
4619	 */
4620	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4621	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4622	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4623	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4624		/* implied cookie-ack.. we must have lost the ack */
4625		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4626			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4627			    stcb->asoc.overall_error_count,
4628			    0,
4629			    SCTP_FROM_SCTP_INPUT,
4630			    __LINE__);
4631		}
4632		stcb->asoc.overall_error_count = 0;
4633		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4634		    *netp);
4635	}
4636process_control_chunks:
4637	while (IS_SCTP_CONTROL(ch)) {
4638		/* validate chunk length */
4639		chk_length = ntohs(ch->chunk_length);
4640		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4641		    ch->chunk_type, chk_length);
4642		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4643		if (chk_length < sizeof(*ch) ||
4644		    (*offset + (int)chk_length) > length) {
4645			*offset = length;
4646			if (locked_tcb) {
4647				SCTP_TCB_UNLOCK(locked_tcb);
4648			}
4649			return (NULL);
4650		}
4651		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4652		/*
4653		 * INIT-ACK only gets the init ack "header" portion only
4654		 * because we don't have to process the peer's COOKIE. All
4655		 * others get a complete chunk.
4656		 */
4657		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4658		    (ch->chunk_type == SCTP_INITIATION)) {
4659			/* get an init-ack chunk */
4660			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4661			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4662			if (ch == NULL) {
4663				*offset = length;
4664				if (locked_tcb) {
4665					SCTP_TCB_UNLOCK(locked_tcb);
4666				}
4667				return (NULL);
4668			}
4669		} else {
4670			/* For cookies and all other chunks. */
4671			if (chk_length > sizeof(chunk_buf)) {
4672				/*
4673				 * use just the size of the chunk buffer so
4674				 * the front part of our chunks fit in
4675				 * contiguous space up to the chunk buffer
4676				 * size (508 bytes). For chunks that need to
4677				 * get more than that they must use the
4678				 * sctp_m_getptr() function or other means
4679				 * (e.g. know how to parse mbuf chains).
4680				 * Cookies do this already.
4681				 */
4682				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4683				    (sizeof(chunk_buf) - 4),
4684				    chunk_buf);
4685				if (ch == NULL) {
4686					*offset = length;
4687					if (locked_tcb) {
4688						SCTP_TCB_UNLOCK(locked_tcb);
4689					}
4690					return (NULL);
4691				}
4692			} else {
4693				/* We can fit it all */
4694				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4695				    chk_length, chunk_buf);
4696				if (ch == NULL) {
4697					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4698					*offset = length;
4699					if (locked_tcb) {
4700						SCTP_TCB_UNLOCK(locked_tcb);
4701					}
4702					return (NULL);
4703				}
4704			}
4705		}
4706		num_chunks++;
4707		/* Save off the last place we got a control from */
4708		if (stcb != NULL) {
4709			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4710				/*
4711				 * allow last_control to be NULL if
4712				 * ASCONF... ASCONF processing will find the
4713				 * right net later
4714				 */
4715				if ((netp != NULL) && (*netp != NULL))
4716					stcb->asoc.last_control_chunk_from = *netp;
4717			}
4718		}
4719#ifdef SCTP_AUDITING_ENABLED
4720		sctp_audit_log(0xB0, ch->chunk_type);
4721#endif
4722
4723		/* check to see if this chunk required auth, but isn't */
4724		if ((stcb != NULL) &&
4725		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
4726		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4727		    !stcb->asoc.authenticated) {
4728			/* "silently" ignore */
4729			SCTP_STAT_INCR(sctps_recvauthmissing);
4730			goto next_chunk;
4731		}
4732		switch (ch->chunk_type) {
4733		case SCTP_INITIATION:
4734			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4735			/* The INIT chunk must be the only chunk. */
4736			if ((num_chunks > 1) ||
4737			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4738				sctp_abort_association(inp, stcb, m, iphlen,
4739				    src, dst, sh, NULL,
4740				    use_mflowid, mflowid,
4741				    vrf_id, port);
4742				*offset = length;
4743				return (NULL);
4744			}
4745			/* Honor our resource limit. */
4746			if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
4747				struct mbuf *op_err;
4748
4749				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
4750				sctp_abort_association(inp, stcb, m, iphlen,
4751				    src, dst, sh, op_err,
4752				    use_mflowid, mflowid,
4753				    vrf_id, port);
4754				*offset = length;
4755				return (NULL);
4756			}
4757			sctp_handle_init(m, iphlen, *offset, src, dst, sh,
4758			    (struct sctp_init_chunk *)ch, inp,
4759			    stcb, &abort_no_unlock,
4760			    use_mflowid, mflowid,
4761			    vrf_id, port);
4762			*offset = length;
4763			if ((!abort_no_unlock) && (locked_tcb)) {
4764				SCTP_TCB_UNLOCK(locked_tcb);
4765			}
4766			return (NULL);
4767			break;
4768		case SCTP_PAD_CHUNK:
4769			break;
4770		case SCTP_INITIATION_ACK:
4771			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4772			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4773				/* We are not interested anymore */
4774				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4775					;
4776				} else {
4777					if (locked_tcb != stcb) {
4778						/* Very unlikely */
4779						SCTP_TCB_UNLOCK(locked_tcb);
4780					}
4781					*offset = length;
4782					if (stcb) {
4783#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4784						so = SCTP_INP_SO(inp);
4785						atomic_add_int(&stcb->asoc.refcnt, 1);
4786						SCTP_TCB_UNLOCK(stcb);
4787						SCTP_SOCKET_LOCK(so, 1);
4788						SCTP_TCB_LOCK(stcb);
4789						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4790#endif
4791						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4792#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4793						SCTP_SOCKET_UNLOCK(so, 1);
4794#endif
4795					}
4796					return (NULL);
4797				}
4798			}
4799			/* The INIT-ACK chunk must be the only chunk. */
4800			if ((num_chunks > 1) ||
4801			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4802				*offset = length;
4803				if (locked_tcb) {
4804					SCTP_TCB_UNLOCK(locked_tcb);
4805				}
4806				return (NULL);
4807			}
4808			if ((netp) && (*netp)) {
4809				ret = sctp_handle_init_ack(m, iphlen, *offset,
4810				    src, dst, sh,
4811				    (struct sctp_init_ack_chunk *)ch,
4812				    stcb, *netp,
4813				    &abort_no_unlock,
4814				    use_mflowid, mflowid,
4815				    vrf_id);
4816			} else {
4817				ret = -1;
4818			}
4819			*offset = length;
4820			if (abort_no_unlock) {
4821				return (NULL);
4822			}
4823			/*
4824			 * Special case, I must call the output routine to
4825			 * get the cookie echoed
4826			 */
4827			if ((stcb != NULL) && (ret == 0)) {
4828				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4829			}
4830			if (locked_tcb) {
4831				SCTP_TCB_UNLOCK(locked_tcb);
4832			}
4833			return (NULL);
4834			break;
4835		case SCTP_SELECTIVE_ACK:
4836			{
4837				struct sctp_sack_chunk *sack;
4838				int abort_now = 0;
4839				uint32_t a_rwnd, cum_ack;
4840				uint16_t num_seg, num_dup;
4841				uint8_t flags;
4842				int offset_seg, offset_dup;
4843
4844				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4845				SCTP_STAT_INCR(sctps_recvsacks);
4846				if (stcb == NULL) {
4847					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
4848					break;
4849				}
4850				if (chk_length < sizeof(struct sctp_sack_chunk)) {
4851					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
4852					break;
4853				}
4854				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4855					/*-
4856					 * If we have sent a shutdown-ack, we will pay no
4857					 * attention to a sack sent in to us since
4858					 * we don't care anymore.
4859					 */
4860					break;
4861				}
4862				sack = (struct sctp_sack_chunk *)ch;
4863				flags = ch->chunk_flags;
4864				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4865				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4866				num_dup = ntohs(sack->sack.num_dup_tsns);
4867				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4868				if (sizeof(struct sctp_sack_chunk) +
4869				    num_seg * sizeof(struct sctp_gap_ack_block) +
4870				    num_dup * sizeof(uint32_t) != chk_length) {
4871					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
4872					break;
4873				}
4874				offset_seg = *offset + sizeof(struct sctp_sack_chunk);
4875				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4876				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4877				    cum_ack, num_seg, a_rwnd);
4878				stcb->asoc.seen_a_sack_this_pkt = 1;
4879				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4880				    (num_seg == 0) &&
4881				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
4882				    (stcb->asoc.saw_sack_with_frags == 0) &&
4883				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4884				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4885				    ) {
4886					/*
4887					 * We have a SIMPLE sack having no
4888					 * prior segments and data on sent
4889					 * queue to be acked.. Use the
4890					 * faster path sack processing. We
4891					 * also allow window update sacks
4892					 * with no missing segments to go
4893					 * this way too.
4894					 */
4895					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen);
4896				} else {
4897					if (netp && *netp)
4898						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
4899						    num_seg, 0, num_dup, &abort_now, flags,
4900						    cum_ack, a_rwnd, ecne_seen);
4901				}
4902				if (abort_now) {
4903					/* ABORT signal from sack processing */
4904					*offset = length;
4905					return (NULL);
4906				}
4907				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4908				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4909				    (stcb->asoc.stream_queue_cnt == 0)) {
4910					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4911				}
4912			}
4913			break;
4914			/*
4915			 * EY - nr_sack:  If the received chunk is an
4916			 * nr_sack chunk
4917			 */
4918		case SCTP_NR_SELECTIVE_ACK:
4919			{
4920				struct sctp_nr_sack_chunk *nr_sack;
4921				int abort_now = 0;
4922				uint32_t a_rwnd, cum_ack;
4923				uint16_t num_seg, num_nr_seg, num_dup;
4924				uint8_t flags;
4925				int offset_seg, offset_dup;
4926
4927				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
4928				SCTP_STAT_INCR(sctps_recvsacks);
4929				if (stcb == NULL) {
4930					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
4931					break;
4932				}
4933				if ((stcb->asoc.sctp_nr_sack_on_off == 0) ||
4934				    (stcb->asoc.peer_supports_nr_sack == 0)) {
4935					goto unknown_chunk;
4936				}
4937				if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
4938					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
4939					break;
4940				}
4941				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4942					/*-
4943					 * If we have sent a shutdown-ack, we will pay no
4944					 * attention to a sack sent in to us since
4945					 * we don't care anymore.
4946					 */
4947					break;
4948				}
4949				nr_sack = (struct sctp_nr_sack_chunk *)ch;
4950				flags = ch->chunk_flags;
4951				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4952				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4953				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4954				num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
4955				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
4956				if (sizeof(struct sctp_nr_sack_chunk) +
4957				    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
4958				    num_dup * sizeof(uint32_t) != chk_length) {
4959					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
4960					break;
4961				}
4962				offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
4963				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4964				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4965				    cum_ack, num_seg, a_rwnd);
4966				stcb->asoc.seen_a_sack_this_pkt = 1;
4967				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4968				    (num_seg == 0) && (num_nr_seg == 0) &&
4969				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
4970				    (stcb->asoc.saw_sack_with_frags == 0) &&
4971				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4972				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
4973					/*
4974					 * We have a SIMPLE sack having no
4975					 * prior segments and data on sent
4976					 * queue to be acked. Use the faster
4977					 * path sack processing. We also
4978					 * allow window update sacks with no
4979					 * missing segments to go this way
4980					 * too.
4981					 */
4982					sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
4983					    &abort_now, ecne_seen);
4984				} else {
4985					if (netp && *netp)
4986						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
4987						    num_seg, num_nr_seg, num_dup, &abort_now, flags,
4988						    cum_ack, a_rwnd, ecne_seen);
4989				}
4990				if (abort_now) {
4991					/* ABORT signal from sack processing */
4992					*offset = length;
4993					return (NULL);
4994				}
4995				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4996				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4997				    (stcb->asoc.stream_queue_cnt == 0)) {
4998					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4999				}
5000			}
5001			break;
5002
5003		case SCTP_HEARTBEAT_REQUEST:
5004			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
5005			if ((stcb) && netp && *netp) {
5006				SCTP_STAT_INCR(sctps_recvheartbeat);
5007				sctp_send_heartbeat_ack(stcb, m, *offset,
5008				    chk_length, *netp);
5009
5010				/* He's alive so give him credit */
5011				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5012					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5013					    stcb->asoc.overall_error_count,
5014					    0,
5015					    SCTP_FROM_SCTP_INPUT,
5016					    __LINE__);
5017				}
5018				stcb->asoc.overall_error_count = 0;
5019			}
5020			break;
5021		case SCTP_HEARTBEAT_ACK:
5022			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
5023			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
5024				/* Its not ours */
5025				*offset = length;
5026				if (locked_tcb) {
5027					SCTP_TCB_UNLOCK(locked_tcb);
5028				}
5029				return (NULL);
5030			}
5031			/* He's alive so give him credit */
5032			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5033				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5034				    stcb->asoc.overall_error_count,
5035				    0,
5036				    SCTP_FROM_SCTP_INPUT,
5037				    __LINE__);
5038			}
5039			stcb->asoc.overall_error_count = 0;
5040			SCTP_STAT_INCR(sctps_recvheartbeatack);
5041			if (netp && *netp)
5042				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
5043				    stcb, *netp);
5044			break;
5045		case SCTP_ABORT_ASSOCIATION:
5046			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
5047			    (void *)stcb);
5048			if ((stcb) && netp && *netp)
5049				sctp_handle_abort((struct sctp_abort_chunk *)ch,
5050				    stcb, *netp);
5051			*offset = length;
5052			return (NULL);
5053			break;
5054		case SCTP_SHUTDOWN:
5055			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
5056			    (void *)stcb);
5057			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
5058				*offset = length;
5059				if (locked_tcb) {
5060					SCTP_TCB_UNLOCK(locked_tcb);
5061				}
5062				return (NULL);
5063			}
5064			if (netp && *netp) {
5065				int abort_flag = 0;
5066
5067				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
5068				    stcb, *netp, &abort_flag);
5069				if (abort_flag) {
5070					*offset = length;
5071					return (NULL);
5072				}
5073			}
5074			break;
5075		case SCTP_SHUTDOWN_ACK:
5076			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", (void *)stcb);
5077			if ((stcb) && (netp) && (*netp))
5078				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
5079			*offset = length;
5080			return (NULL);
5081			break;
5082
5083		case SCTP_OPERATION_ERROR:
5084			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
5085			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
5086				*offset = length;
5087				return (NULL);
5088			}
5089			break;
5090		case SCTP_COOKIE_ECHO:
5091			SCTPDBG(SCTP_DEBUG_INPUT3,
5092			    "SCTP_COOKIE-ECHO, stcb %p\n", (void *)stcb);
5093			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5094				;
5095			} else {
5096				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5097					/* We are not interested anymore */
5098			abend:
5099					if (stcb) {
5100						SCTP_TCB_UNLOCK(stcb);
5101					}
5102					*offset = length;
5103					return (NULL);
5104				}
5105			}
5106			/*
5107			 * First are we accepting? We do this again here
5108			 * since it is possible that a previous endpoint WAS
5109			 * listening responded to a INIT-ACK and then
5110			 * closed. We opened and bound.. and are now no
5111			 * longer listening.
5112			 */
5113
5114			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
5115				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
5116				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
5117					struct mbuf *op_err;
5118
5119					op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
5120					sctp_abort_association(inp, stcb, m, iphlen,
5121					    src, dst, sh, op_err,
5122					    use_mflowid, mflowid,
5123					    vrf_id, port);
5124				}
5125				*offset = length;
5126				return (NULL);
5127			} else {
5128				struct mbuf *ret_buf;
5129				struct sctp_inpcb *linp;
5130
5131				if (stcb) {
5132					linp = NULL;
5133				} else {
5134					linp = inp;
5135				}
5136
5137				if (linp) {
5138					SCTP_ASOC_CREATE_LOCK(linp);
5139					if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5140					    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5141						SCTP_ASOC_CREATE_UNLOCK(linp);
5142						goto abend;
5143					}
5144				}
5145				if (netp) {
5146					ret_buf =
5147					    sctp_handle_cookie_echo(m, iphlen,
5148					    *offset,
5149					    src, dst,
5150					    sh,
5151					    (struct sctp_cookie_echo_chunk *)ch,
5152					    &inp, &stcb, netp,
5153					    auth_skipped,
5154					    auth_offset,
5155					    auth_len,
5156					    &locked_tcb,
5157					    use_mflowid,
5158					    mflowid,
5159					    vrf_id,
5160					    port);
5161				} else {
5162					ret_buf = NULL;
5163				}
5164				if (linp) {
5165					SCTP_ASOC_CREATE_UNLOCK(linp);
5166				}
5167				if (ret_buf == NULL) {
5168					if (locked_tcb) {
5169						SCTP_TCB_UNLOCK(locked_tcb);
5170					}
5171					SCTPDBG(SCTP_DEBUG_INPUT3,
5172					    "GAK, null buffer\n");
5173					*offset = length;
5174					return (NULL);
5175				}
5176				/* if AUTH skipped, see if it verified... */
5177				if (auth_skipped) {
5178					got_auth = 1;
5179					auth_skipped = 0;
5180				}
5181				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
5182					/*
5183					 * Restart the timer if we have
5184					 * pending data
5185					 */
5186					struct sctp_tmit_chunk *chk;
5187
5188					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
5189					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
5190				}
5191			}
5192			break;
5193		case SCTP_COOKIE_ACK:
5194			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", (void *)stcb);
5195			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5196				if (locked_tcb) {
5197					SCTP_TCB_UNLOCK(locked_tcb);
5198				}
5199				return (NULL);
5200			}
5201			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5202				/* We are not interested anymore */
5203				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5204					;
5205				} else if (stcb) {
5206#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5207					so = SCTP_INP_SO(inp);
5208					atomic_add_int(&stcb->asoc.refcnt, 1);
5209					SCTP_TCB_UNLOCK(stcb);
5210					SCTP_SOCKET_LOCK(so, 1);
5211					SCTP_TCB_LOCK(stcb);
5212					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5213#endif
5214					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
5215#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5216					SCTP_SOCKET_UNLOCK(so, 1);
5217#endif
5218					*offset = length;
5219					return (NULL);
5220				}
5221			}
5222			/* He's alive so give him credit */
5223			if ((stcb) && netp && *netp) {
5224				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5225					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5226					    stcb->asoc.overall_error_count,
5227					    0,
5228					    SCTP_FROM_SCTP_INPUT,
5229					    __LINE__);
5230				}
5231				stcb->asoc.overall_error_count = 0;
5232				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
5233			}
5234			break;
5235		case SCTP_ECN_ECHO:
5236			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
5237			/* He's alive so give him credit */
5238			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5239				/* Its not ours */
5240				if (locked_tcb) {
5241					SCTP_TCB_UNLOCK(locked_tcb);
5242				}
5243				*offset = length;
5244				return (NULL);
5245			}
5246			if (stcb) {
5247				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5248					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5249					    stcb->asoc.overall_error_count,
5250					    0,
5251					    SCTP_FROM_SCTP_INPUT,
5252					    __LINE__);
5253				}
5254				stcb->asoc.overall_error_count = 0;
5255				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
5256				    stcb);
5257				ecne_seen = 1;
5258			}
5259			break;
5260		case SCTP_ECN_CWR:
5261			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
5262			/* He's alive so give him credit */
5263			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5264				/* Its not ours */
5265				if (locked_tcb) {
5266					SCTP_TCB_UNLOCK(locked_tcb);
5267				}
5268				*offset = length;
5269				return (NULL);
5270			}
5271			if (stcb) {
5272				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5273					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5274					    stcb->asoc.overall_error_count,
5275					    0,
5276					    SCTP_FROM_SCTP_INPUT,
5277					    __LINE__);
5278				}
5279				stcb->asoc.overall_error_count = 0;
5280				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5281			}
5282			break;
5283		case SCTP_SHUTDOWN_COMPLETE:
5284			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", (void *)stcb);
5285			/* must be first and only chunk */
5286			if ((num_chunks > 1) ||
5287			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5288				*offset = length;
5289				if (locked_tcb) {
5290					SCTP_TCB_UNLOCK(locked_tcb);
5291				}
5292				return (NULL);
5293			}
5294			if ((stcb) && netp && *netp) {
5295				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5296				    stcb, *netp);
5297			}
5298			*offset = length;
5299			return (NULL);
5300			break;
5301		case SCTP_ASCONF:
5302			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5303			/* He's alive so give him credit */
5304			if (stcb) {
5305				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5306					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5307					    stcb->asoc.overall_error_count,
5308					    0,
5309					    SCTP_FROM_SCTP_INPUT,
5310					    __LINE__);
5311				}
5312				stcb->asoc.overall_error_count = 0;
5313				sctp_handle_asconf(m, *offset, src,
5314				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5315				asconf_cnt++;
5316			}
5317			break;
5318		case SCTP_ASCONF_ACK:
5319			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
5320			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5321				/* Its not ours */
5322				if (locked_tcb) {
5323					SCTP_TCB_UNLOCK(locked_tcb);
5324				}
5325				*offset = length;
5326				return (NULL);
5327			}
5328			if ((stcb) && netp && *netp) {
5329				/* He's alive so give him credit */
5330				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5331					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5332					    stcb->asoc.overall_error_count,
5333					    0,
5334					    SCTP_FROM_SCTP_INPUT,
5335					    __LINE__);
5336				}
5337				stcb->asoc.overall_error_count = 0;
5338				sctp_handle_asconf_ack(m, *offset,
5339				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5340				if (abort_no_unlock)
5341					return (NULL);
5342			}
5343			break;
5344		case SCTP_FORWARD_CUM_TSN:
5345			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
5346			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5347				/* Its not ours */
5348				if (locked_tcb) {
5349					SCTP_TCB_UNLOCK(locked_tcb);
5350				}
5351				*offset = length;
5352				return (NULL);
5353			}
5354			/* He's alive so give him credit */
5355			if (stcb) {
5356				int abort_flag = 0;
5357
5358				stcb->asoc.overall_error_count = 0;
5359				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5360					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5361					    stcb->asoc.overall_error_count,
5362					    0,
5363					    SCTP_FROM_SCTP_INPUT,
5364					    __LINE__);
5365				}
5366				*fwd_tsn_seen = 1;
5367				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5368					/* We are not interested anymore */
5369#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5370					so = SCTP_INP_SO(inp);
5371					atomic_add_int(&stcb->asoc.refcnt, 1);
5372					SCTP_TCB_UNLOCK(stcb);
5373					SCTP_SOCKET_LOCK(so, 1);
5374					SCTP_TCB_LOCK(stcb);
5375					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5376#endif
5377					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5378#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5379					SCTP_SOCKET_UNLOCK(so, 1);
5380#endif
5381					*offset = length;
5382					return (NULL);
5383				}
5384				sctp_handle_forward_tsn(stcb,
5385				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5386				if (abort_flag) {
5387					*offset = length;
5388					return (NULL);
5389				} else {
5390					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5391						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5392						    stcb->asoc.overall_error_count,
5393						    0,
5394						    SCTP_FROM_SCTP_INPUT,
5395						    __LINE__);
5396					}
5397					stcb->asoc.overall_error_count = 0;
5398				}
5399
5400			}
5401			break;
5402		case SCTP_STREAM_RESET:
5403			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5404			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5405				/* Its not ours */
5406				if (locked_tcb) {
5407					SCTP_TCB_UNLOCK(locked_tcb);
5408				}
5409				*offset = length;
5410				return (NULL);
5411			}
5412			if (stcb->asoc.peer_supports_strreset == 0) {
5413				/*
5414				 * hmm, peer should have announced this, but
5415				 * we will turn it on since he is sending us
5416				 * a stream reset.
5417				 */
5418				stcb->asoc.peer_supports_strreset = 1;
5419			}
5420			if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
5421				/* stop processing */
5422				*offset = length;
5423				return (NULL);
5424			}
5425			break;
5426		case SCTP_PACKET_DROPPED:
5427			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5428			/* re-get it all please */
5429			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5430				/* Its not ours */
5431				if (locked_tcb) {
5432					SCTP_TCB_UNLOCK(locked_tcb);
5433				}
5434				*offset = length;
5435				return (NULL);
5436			}
5437			if (ch && (stcb) && netp && (*netp)) {
5438				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5439				    stcb, *netp,
5440				    min(chk_length, (sizeof(chunk_buf) - 4)));
5441
5442			}
5443			break;
5444
5445		case SCTP_AUTHENTICATION:
5446			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5447			if (SCTP_BASE_SYSCTL(sctp_auth_disable))
5448				goto unknown_chunk;
5449
5450			if (stcb == NULL) {
5451				/* save the first AUTH for later processing */
5452				if (auth_skipped == 0) {
5453					auth_offset = *offset;
5454					auth_len = chk_length;
5455					auth_skipped = 1;
5456				}
5457				/* skip this chunk (temporarily) */
5458				goto next_chunk;
5459			}
5460			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5461			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5462			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5463				/* Its not ours */
5464				if (locked_tcb) {
5465					SCTP_TCB_UNLOCK(locked_tcb);
5466				}
5467				*offset = length;
5468				return (NULL);
5469			}
5470			if (got_auth == 1) {
5471				/* skip this chunk... it's already auth'd */
5472				goto next_chunk;
5473			}
5474			got_auth = 1;
5475			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5476			    m, *offset)) {
5477				/* auth HMAC failed so dump the packet */
5478				*offset = length;
5479				return (stcb);
5480			} else {
5481				/* remaining chunks are HMAC checked */
5482				stcb->asoc.authenticated = 1;
5483			}
5484			break;
5485
5486		default:
5487	unknown_chunk:
5488			/* it's an unknown chunk! */
5489			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5490				struct mbuf *mm;
5491				struct sctp_paramhdr *phd;
5492
5493				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
5494				    0, M_NOWAIT, 1, MT_DATA);
5495				if (mm) {
5496					phd = mtod(mm, struct sctp_paramhdr *);
5497					/*
5498					 * We cheat and use param type since
5499					 * we did not bother to define a
5500					 * error cause struct. They are the
5501					 * same basic format with different
5502					 * names.
5503					 */
5504					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5505					phd->param_length = htons(chk_length + sizeof(*phd));
5506					SCTP_BUF_LEN(mm) = sizeof(*phd);
5507					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
5508					if (SCTP_BUF_NEXT(mm)) {
5509						if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(mm), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
5510							sctp_m_freem(mm);
5511						} else {
5512#ifdef SCTP_MBUF_LOGGING
5513							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5514								struct mbuf *mat;
5515
5516								for (mat = SCTP_BUF_NEXT(mm); mat; mat = SCTP_BUF_NEXT(mat)) {
5517									if (SCTP_BUF_IS_EXTENDED(mat)) {
5518										sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5519									}
5520								}
5521							}
5522#endif
5523							sctp_queue_op_err(stcb, mm);
5524						}
5525					} else {
5526						sctp_m_freem(mm);
5527					}
5528				}
5529			}
5530			if ((ch->chunk_type & 0x80) == 0) {
5531				/* discard this packet */
5532				*offset = length;
5533				return (stcb);
5534			}	/* else skip this bad chunk and continue... */
5535			break;
5536		}		/* switch (ch->chunk_type) */
5537
5538
5539next_chunk:
5540		/* get the next chunk */
5541		*offset += SCTP_SIZE32(chk_length);
5542		if (*offset >= length) {
5543			/* no more data left in the mbuf chain */
5544			break;
5545		}
5546		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5547		    sizeof(struct sctp_chunkhdr), chunk_buf);
5548		if (ch == NULL) {
5549			if (locked_tcb) {
5550				SCTP_TCB_UNLOCK(locked_tcb);
5551			}
5552			*offset = length;
5553			return (NULL);
5554		}
5555	}			/* while */
5556
5557	if (asconf_cnt > 0 && stcb != NULL) {
5558		sctp_send_asconf_ack(stcb);
5559	}
5560	return (stcb);
5561}
5562
5563
5564#ifdef INVARIANTS
5565#ifdef __GNUC__
5566__attribute__((noinline))
5567#endif
5568	void
5569	     sctp_validate_no_locks(struct sctp_inpcb *inp)
5570{
5571	struct sctp_tcb *lstcb;
5572
5573	LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) {
5574		if (mtx_owned(&lstcb->tcb_mtx)) {
5575			panic("Own lock on stcb at return from input");
5576		}
5577	}
5578	if (mtx_owned(&inp->inp_create_mtx)) {
5579		panic("Own create lock on inp");
5580	}
5581	if (mtx_owned(&inp->inp_mtx)) {
5582		panic("Own inp lock on inp");
5583	}
5584}
5585
5586#endif
5587
5588/*
5589 * common input chunk processing (v4 and v6)
5590 */
5591void
5592sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
5593    struct sockaddr *src, struct sockaddr *dst,
5594    struct sctphdr *sh, struct sctp_chunkhdr *ch,
5595#if !defined(SCTP_WITH_NO_CSUM)
5596    uint8_t compute_crc,
5597#endif
5598    uint8_t ecn_bits,
5599    uint8_t use_mflowid, uint32_t mflowid,
5600    uint32_t vrf_id, uint16_t port)
5601{
5602	uint32_t high_tsn;
5603	int fwd_tsn_seen = 0, data_processed = 0;
5604	struct mbuf *m = *mm;
5605	int un_sent;
5606	int cnt_ctrl_ready = 0;
5607	struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
5608	struct sctp_tcb *stcb = NULL;
5609	struct sctp_nets *net = NULL;
5610
5611	SCTP_STAT_INCR(sctps_recvdatagrams);
5612#ifdef SCTP_AUDITING_ENABLED
5613	sctp_audit_log(0xE0, 1);
5614	sctp_auditing(0, inp, stcb, net);
5615#endif
5616#if !defined(SCTP_WITH_NO_CSUM)
5617	if (compute_crc != 0) {
5618		uint32_t check, calc_check;
5619
5620		check = sh->checksum;
5621		sh->checksum = 0;
5622		calc_check = sctp_calculate_cksum(m, iphlen);
5623		sh->checksum = check;
5624		if (calc_check != check) {
5625			SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5626			    calc_check, check, (void *)m, length, iphlen);
5627			stcb = sctp_findassociation_addr(m, offset, src, dst,
5628			    sh, ch, &inp, &net, vrf_id);
5629			if ((net != NULL) && (port != 0)) {
5630				if (net->port == 0) {
5631					sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr));
5632				}
5633				net->port = port;
5634			}
5635			if ((net != NULL) && (use_mflowid != 0)) {
5636				net->flowid = mflowid;
5637#ifdef INVARIANTS
5638				net->flowidset = 1;
5639#endif
5640			}
5641			if ((inp != NULL) && (stcb != NULL)) {
5642				sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
5643				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5644			} else if ((inp != NULL) && (stcb == NULL)) {
5645				inp_decr = inp;
5646			}
5647			SCTP_STAT_INCR(sctps_badsum);
5648			SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5649			goto out;
5650		}
5651	}
5652#endif
5653	/* Destination port of 0 is illegal, based on RFC4960. */
5654	if (sh->dest_port == 0) {
5655		SCTP_STAT_INCR(sctps_hdrops);
5656		goto out;
5657	}
5658	stcb = sctp_findassociation_addr(m, offset, src, dst,
5659	    sh, ch, &inp, &net, vrf_id);
5660	if ((net != NULL) && (port != 0)) {
5661		if (net->port == 0) {
5662			sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr));
5663		}
5664		net->port = port;
5665	}
5666	if ((net != NULL) && (use_mflowid != 0)) {
5667		net->flowid = mflowid;
5668#ifdef INVARIANTS
5669		net->flowidset = 1;
5670#endif
5671	}
5672	if (inp == NULL) {
5673		SCTP_STAT_INCR(sctps_noport);
5674		if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
5675			goto out;
5676		}
5677		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5678			sctp_send_shutdown_complete2(src, dst, sh,
5679			    use_mflowid, mflowid,
5680			    vrf_id, port);
5681			goto out;
5682		}
5683		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5684			goto out;
5685		}
5686		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
5687			if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
5688			    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
5689			    (ch->chunk_type != SCTP_INIT))) {
5690				sctp_send_abort(m, iphlen, src, dst,
5691				    sh, 0, NULL,
5692				    use_mflowid, mflowid,
5693				    vrf_id, port);
5694			}
5695		}
5696		goto out;
5697	} else if (stcb == NULL) {
5698		inp_decr = inp;
5699	}
5700#ifdef IPSEC
5701	/*-
5702	 * I very much doubt any of the IPSEC stuff will work but I have no
5703	 * idea, so I will leave it in place.
5704	 */
5705	if (inp != NULL) {
5706		switch (dst->sa_family) {
5707#ifdef INET
5708		case AF_INET:
5709			if (ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5710				MODULE_GLOBAL(ipsec4stat).in_polvio++;
5711				SCTP_STAT_INCR(sctps_hdrops);
5712				goto out;
5713			}
5714			break;
5715#endif
5716#ifdef INET6
5717		case AF_INET6:
5718			if (ipsec6_in_reject(m, &inp->ip_inp.inp)) {
5719				MODULE_GLOBAL(ipsec6stat).in_polvio++;
5720				SCTP_STAT_INCR(sctps_hdrops);
5721				goto out;
5722			}
5723			break;
5724#endif
5725		default:
5726			break;
5727		}
5728	}
5729#endif
5730	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5731	    (void *)m, iphlen, offset, length, (void *)stcb);
5732	if (stcb) {
5733		/* always clear this before beginning a packet */
5734		stcb->asoc.authenticated = 0;
5735		stcb->asoc.seen_a_sack_this_pkt = 0;
5736		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5737		    (void *)stcb, stcb->asoc.state);
5738
5739		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5740		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5741			/*-
5742			 * If we hit here, we had a ref count
5743			 * up when the assoc was aborted and the
5744			 * timer is clearing out the assoc, we should
5745			 * NOT respond to any packet.. its OOTB.
5746			 */
5747			SCTP_TCB_UNLOCK(stcb);
5748			stcb = NULL;
5749			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp,
5750			    use_mflowid, mflowid,
5751			    vrf_id, port);
5752			goto out;
5753		}
5754	}
5755	if (IS_SCTP_CONTROL(ch)) {
5756		/* process the control portion of the SCTP packet */
5757		/* sa_ignore NO_NULL_CHK */
5758		stcb = sctp_process_control(m, iphlen, &offset, length,
5759		    src, dst, sh, ch,
5760		    inp, stcb, &net, &fwd_tsn_seen,
5761		    use_mflowid, mflowid,
5762		    vrf_id, port);
5763		if (stcb) {
5764			/*
5765			 * This covers us if the cookie-echo was there and
5766			 * it changes our INP.
5767			 */
5768			inp = stcb->sctp_ep;
5769			if ((net) && (port)) {
5770				if (net->port == 0) {
5771					sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr));
5772				}
5773				net->port = port;
5774			}
5775		}
5776	} else {
5777		/*
5778		 * no control chunks, so pre-process DATA chunks (these
5779		 * checks are taken care of by control processing)
5780		 */
5781
5782		/*
5783		 * if DATA only packet, and auth is required, then punt...
5784		 * can't have authenticated without any AUTH (control)
5785		 * chunks
5786		 */
5787		if ((stcb != NULL) &&
5788		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5789		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5790			/* "silently" ignore */
5791			SCTP_STAT_INCR(sctps_recvauthmissing);
5792			goto out;
5793		}
5794		if (stcb == NULL) {
5795			/* out of the blue DATA chunk */
5796			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp,
5797			    use_mflowid, mflowid,
5798			    vrf_id, port);
5799			goto out;
5800		}
5801		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5802			/* v_tag mismatch! */
5803			SCTP_STAT_INCR(sctps_badvtag);
5804			goto out;
5805		}
5806	}
5807
5808	if (stcb == NULL) {
5809		/*
5810		 * no valid TCB for this packet, or we found it's a bad
5811		 * packet while processing control, or we're done with this
5812		 * packet (done or skip rest of data), so we drop it...
5813		 */
5814		goto out;
5815	}
5816	/*
5817	 * DATA chunk processing
5818	 */
5819	/* plow through the data chunks while length > offset */
5820
5821	/*
5822	 * Rest should be DATA only.  Check authentication state if AUTH for
5823	 * DATA is required.
5824	 */
5825	if ((length > offset) &&
5826	    (stcb != NULL) &&
5827	    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5828	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5829	    !stcb->asoc.authenticated) {
5830		/* "silently" ignore */
5831		SCTP_STAT_INCR(sctps_recvauthmissing);
5832		SCTPDBG(SCTP_DEBUG_AUTH1,
5833		    "Data chunk requires AUTH, skipped\n");
5834		goto trigger_send;
5835	}
5836	if (length > offset) {
5837		int retval;
5838
5839		/*
5840		 * First check to make sure our state is correct. We would
5841		 * not get here unless we really did have a tag, so we don't
5842		 * abort if this happens, just dump the chunk silently.
5843		 */
5844		switch (SCTP_GET_STATE(&stcb->asoc)) {
5845		case SCTP_STATE_COOKIE_ECHOED:
5846			/*
5847			 * we consider data with valid tags in this state
5848			 * shows us the cookie-ack was lost. Imply it was
5849			 * there.
5850			 */
5851			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5852				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5853				    stcb->asoc.overall_error_count,
5854				    0,
5855				    SCTP_FROM_SCTP_INPUT,
5856				    __LINE__);
5857			}
5858			stcb->asoc.overall_error_count = 0;
5859			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5860			break;
5861		case SCTP_STATE_COOKIE_WAIT:
5862			/*
5863			 * We consider OOTB any data sent during asoc setup.
5864			 */
5865			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp,
5866			    use_mflowid, mflowid,
5867			    vrf_id, port);
5868			goto out;
5869			/* sa_ignore NOTREACHED */
5870			break;
5871		case SCTP_STATE_EMPTY:	/* should not happen */
5872		case SCTP_STATE_INUSE:	/* should not happen */
5873		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5874		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5875		default:
5876			goto out;
5877			/* sa_ignore NOTREACHED */
5878			break;
5879		case SCTP_STATE_OPEN:
5880		case SCTP_STATE_SHUTDOWN_SENT:
5881			break;
5882		}
5883		/* plow through the data chunks while length > offset */
5884		retval = sctp_process_data(mm, iphlen, &offset, length,
5885		    src, dst, sh,
5886		    inp, stcb, net, &high_tsn,
5887		    use_mflowid, mflowid,
5888		    vrf_id, port);
5889		if (retval == 2) {
5890			/*
5891			 * The association aborted, NO UNLOCK needed since
5892			 * the association is destroyed.
5893			 */
5894			stcb = NULL;
5895			goto out;
5896		}
5897		data_processed = 1;
5898		/*
5899		 * Anything important needs to have been m_copy'ed in
5900		 * process_data
5901		 */
5902	}
5903	/* take care of ecn */
5904	if ((data_processed == 1) &&
5905	    (stcb->asoc.ecn_allowed == 1) &&
5906	    ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
5907		/* Yep, we need to add a ECNE */
5908		sctp_send_ecn_echo(stcb, net, high_tsn);
5909	}
5910	if ((data_processed == 0) && (fwd_tsn_seen)) {
5911		int was_a_gap;
5912		uint32_t highest_tsn;
5913
5914		if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
5915			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
5916		} else {
5917			highest_tsn = stcb->asoc.highest_tsn_inside_map;
5918		}
5919		was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
5920		stcb->asoc.send_sack = 1;
5921		sctp_sack_check(stcb, was_a_gap);
5922	} else if (fwd_tsn_seen) {
5923		stcb->asoc.send_sack = 1;
5924	}
5925	/* trigger send of any chunks in queue... */
5926trigger_send:
5927#ifdef SCTP_AUDITING_ENABLED
5928	sctp_audit_log(0xE0, 2);
5929	sctp_auditing(1, inp, stcb, net);
5930#endif
5931	SCTPDBG(SCTP_DEBUG_INPUT1,
5932	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5933	    stcb->asoc.peers_rwnd,
5934	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5935	    stcb->asoc.total_flight);
5936	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5937	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
5938		cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
5939	}
5940	if (cnt_ctrl_ready ||
5941	    ((un_sent) &&
5942	    (stcb->asoc.peers_rwnd > 0 ||
5943	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5944		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5945		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5946		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5947	}
5948#ifdef SCTP_AUDITING_ENABLED
5949	sctp_audit_log(0xE0, 3);
5950	sctp_auditing(2, inp, stcb, net);
5951#endif
5952out:
5953	if (stcb != NULL) {
5954		SCTP_TCB_UNLOCK(stcb);
5955	}
5956	if (inp_decr != NULL) {
5957		/* reduce ref-count */
5958		SCTP_INP_WLOCK(inp_decr);
5959		SCTP_INP_DECR_REF(inp_decr);
5960		SCTP_INP_WUNLOCK(inp_decr);
5961	}
5962#ifdef INVARIANTS
5963	if (inp != NULL) {
5964		sctp_validate_no_locks(inp);
5965	}
5966#endif
5967	return;
5968}
5969
5970#if 0
5971static void
5972sctp_print_mbuf_chain(struct mbuf *m)
5973{
5974	for (; m; m = SCTP_BUF_NEXT(m)) {
5975		SCTP_PRINTF("%p: m_len = %ld\n", (void *)m, SCTP_BUF_LEN(m));
5976		if (SCTP_BUF_IS_EXTENDED(m))
5977			SCTP_PRINTF("%p: extend_size = %d\n", (void *)m, SCTP_BUF_EXTEND_SIZE(m));
5978	}
5979}
5980
5981#endif
5982
5983#ifdef INET
5984void
5985sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
5986{
5987	struct mbuf *m;
5988	int iphlen;
5989	uint32_t vrf_id = 0;
5990	uint8_t ecn_bits;
5991	struct sockaddr_in src, dst;
5992	struct ip *ip;
5993	struct sctphdr *sh;
5994	struct sctp_chunkhdr *ch;
5995	int length, offset;
5996
5997#if !defined(SCTP_WITH_NO_CSUM)
5998	uint8_t compute_crc;
5999
6000#endif
6001	uint32_t mflowid;
6002	uint8_t use_mflowid;
6003
6004	iphlen = off;
6005	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
6006		SCTP_RELEASE_PKT(i_pak);
6007		return;
6008	}
6009	m = SCTP_HEADER_TO_CHAIN(i_pak);
6010#ifdef SCTP_MBUF_LOGGING
6011	/* Log in any input mbufs */
6012	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6013		struct mbuf *mat;
6014
6015		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6016			if (SCTP_BUF_IS_EXTENDED(mat)) {
6017				sctp_log_mb(mat, SCTP_MBUF_INPUT);
6018			}
6019		}
6020	}
6021#endif
6022#ifdef SCTP_PACKET_LOGGING
6023	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
6024		sctp_packet_log(m);
6025	}
6026#endif
6027	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6028	    "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6029	    m->m_pkthdr.len,
6030	    if_name(m->m_pkthdr.rcvif),
6031	    m->m_pkthdr.csum_flags);
6032	if (m->m_flags & M_FLOWID) {
6033		mflowid = m->m_pkthdr.flowid;
6034		use_mflowid = 1;
6035	} else {
6036		mflowid = 0;
6037		use_mflowid = 0;
6038	}
6039	SCTP_STAT_INCR(sctps_recvpackets);
6040	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
6041	/* Get IP, SCTP, and first chunk header together in the first mbuf. */
6042	offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
6043	if (SCTP_BUF_LEN(m) < offset) {
6044		if ((m = m_pullup(m, offset)) == NULL) {
6045			SCTP_STAT_INCR(sctps_hdrops);
6046			return;
6047		}
6048	}
6049	ip = mtod(m, struct ip *);
6050	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
6051	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
6052	offset -= sizeof(struct sctp_chunkhdr);
6053	memset(&src, 0, sizeof(struct sockaddr_in));
6054	src.sin_family = AF_INET;
6055	src.sin_len = sizeof(struct sockaddr_in);
6056	src.sin_port = sh->src_port;
6057	src.sin_addr = ip->ip_src;
6058	memset(&dst, 0, sizeof(struct sockaddr_in));
6059	dst.sin_family = AF_INET;
6060	dst.sin_len = sizeof(struct sockaddr_in);
6061	dst.sin_port = sh->dest_port;
6062	dst.sin_addr = ip->ip_dst;
6063	length = ntohs(ip->ip_len);
6064	/* Validate mbuf chain length with IP payload length. */
6065	if (SCTP_HEADER_LEN(m) != length) {
6066		SCTPDBG(SCTP_DEBUG_INPUT1,
6067		    "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
6068		SCTP_STAT_INCR(sctps_hdrops);
6069		goto out;
6070	}
6071	/* SCTP does not allow broadcasts or multicasts */
6072	if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
6073		goto out;
6074	}
6075	if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
6076		goto out;
6077	}
6078	ecn_bits = ip->ip_tos;
6079#if defined(SCTP_WITH_NO_CSUM)
6080	SCTP_STAT_INCR(sctps_recvnocrc);
6081#else
6082	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
6083		SCTP_STAT_INCR(sctps_recvhwcrc);
6084		compute_crc = 0;
6085	} else {
6086		SCTP_STAT_INCR(sctps_recvswcrc);
6087		compute_crc = 1;
6088	}
6089#endif
6090	sctp_common_input_processing(&m, iphlen, offset, length,
6091	    (struct sockaddr *)&src,
6092	    (struct sockaddr *)&dst,
6093	    sh, ch,
6094#if !defined(SCTP_WITH_NO_CSUM)
6095	    compute_crc,
6096#endif
6097	    ecn_bits,
6098	    use_mflowid, mflowid,
6099	    vrf_id, port);
6100out:
6101	if (m) {
6102		sctp_m_freem(m);
6103	}
6104	return;
6105}
6106
6107#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6108extern int *sctp_cpuarry;
6109
6110#endif
6111
6112void
6113sctp_input(struct mbuf *m, int off)
6114{
6115#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6116	struct ip *ip;
6117	struct sctphdr *sh;
6118	int offset;
6119	int cpu_to_use;
6120	uint32_t flowid, tag;
6121
6122	if (mp_ncpus > 1) {
6123		if (m->m_flags & M_FLOWID) {
6124			flowid = m->m_pkthdr.flowid;
6125		} else {
6126			/*
6127			 * No flow id built by lower layers fix it so we
6128			 * create one.
6129			 */
6130			offset = off + sizeof(struct sctphdr);
6131			if (SCTP_BUF_LEN(m) < offset) {
6132				if ((m = m_pullup(m, offset)) == NULL) {
6133					SCTP_STAT_INCR(sctps_hdrops);
6134					return;
6135				}
6136			}
6137			ip = mtod(m, struct ip *);
6138			sh = (struct sctphdr *)((caddr_t)ip + off);
6139			tag = htonl(sh->v_tag);
6140			flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
6141			m->m_pkthdr.flowid = flowid;
6142			m->m_flags |= M_FLOWID;
6143		}
6144		cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
6145		sctp_queue_to_mcore(m, off, cpu_to_use);
6146		return;
6147	}
6148#endif
6149	sctp_input_with_port(m, off, 0);
6150}
6151
6152#endif
6153