sctp_input.c revision 180955
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 180955 2008-07-29 09:06:35Z rrs $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_auth.h>
45#include <netinet/sctp_indata.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_bsd_addr.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/udp.h>
50
51
52
53static void
54sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
55{
56	struct sctp_nets *net;
57
58	/*
59	 * This now not only stops all cookie timers it also stops any INIT
60	 * timers as well. This will make sure that the timers are stopped
61	 * in all collision cases.
62	 */
63	SCTP_TCB_LOCK_ASSERT(stcb);
64	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
65		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
66			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
67			    stcb->sctp_ep,
68			    stcb,
69			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
70		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
71			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
72			    stcb->sctp_ep,
73			    stcb,
74			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
75		}
76	}
77}
78
79/* INIT handler */
80static void
81sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
82    struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
83    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port)
84{
85	struct sctp_init *init;
86	struct mbuf *op_err;
87	uint32_t init_limit;
88
89	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
90	    stcb);
91	if (stcb == NULL) {
92		SCTP_INP_RLOCK(inp);
93		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
94			goto outnow;
95		}
96	}
97	op_err = NULL;
98	init = &cp->init;
99	/* First are we accepting? */
100	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
101		SCTPDBG(SCTP_DEBUG_INPUT2,
102		    "sctp_handle_init: Abort, so_qlimit:%d\n",
103		    inp->sctp_socket->so_qlimit);
104		/*
105		 * FIX ME ?? What about TCP model and we have a
106		 * match/restart case? Actually no fix is needed. the lookup
107		 * will always find the existing assoc so stcb would not be
108		 * NULL. It may be questionable to do this since we COULD
109		 * just send back the INIT-ACK and hope that the app did
110		 * accept()'s by the time the COOKIE was sent. But there is
111		 * a price to pay for COOKIE generation and I don't want to
112		 * pay it on the chance that the app will actually do some
113		 * accepts(). The App just looses and should NOT be in this
114		 * state :-)
115		 */
116		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
117		    vrf_id, port);
118		if (stcb)
119			*abort_no_unlock = 1;
120		goto outnow;
121	}
122	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
123		/* Invalid length */
124		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
125		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
126		    vrf_id, port);
127		if (stcb)
128			*abort_no_unlock = 1;
129		goto outnow;
130	}
131	/* validate parameters */
132	if (init->initiate_tag == 0) {
133		/* protocol error... send abort */
134		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
135		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
136		    vrf_id, port);
137		if (stcb)
138			*abort_no_unlock = 1;
139		goto outnow;
140	}
141	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
142		/* invalid parameter... send abort */
143		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
144		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
145		    vrf_id, port);
146		if (stcb)
147			*abort_no_unlock = 1;
148		goto outnow;
149	}
150	if (init->num_inbound_streams == 0) {
151		/* protocol error... send abort */
152		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
153		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
154		    vrf_id, port);
155		if (stcb)
156			*abort_no_unlock = 1;
157		goto outnow;
158	}
159	if (init->num_outbound_streams == 0) {
160		/* protocol error... send abort */
161		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
162		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
163		    vrf_id, port);
164		if (stcb)
165			*abort_no_unlock = 1;
166		goto outnow;
167	}
168	init_limit = offset + ntohs(cp->ch.chunk_length);
169	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
170	    init_limit)) {
171		/* auth parameter(s) error... send abort */
172		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port);
173		if (stcb)
174			*abort_no_unlock = 1;
175		goto outnow;
176	}
177	/* send an INIT-ACK w/cookie */
178	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
179	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port,
180	    ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
181outnow:
182	if (stcb == NULL) {
183		SCTP_INP_RUNLOCK(inp);
184	}
185}
186
187/*
188 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
189 */
190
191int
192sctp_is_there_unsent_data(struct sctp_tcb *stcb)
193{
194	int unsent_data = 0;
195	struct sctp_stream_queue_pending *sp;
196	struct sctp_stream_out *strq;
197	struct sctp_association *asoc;
198
199	/*
200	 * This function returns the number of streams that have true unsent
201	 * data on them. Note that as it looks through it will clean up any
202	 * places that have old data that has been sent but left at top of
203	 * stream queue.
204	 */
205	asoc = &stcb->asoc;
206	SCTP_TCB_SEND_LOCK(stcb);
207	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
208		/* Check to see if some data queued */
209		TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) {
210	is_there_another:
211			/* sa_ignore FREED_MEMORY */
212			sp = TAILQ_FIRST(&strq->outqueue);
213			if (sp == NULL) {
214				continue;
215			}
216			if ((sp->msg_is_complete) &&
217			    (sp->length == 0) &&
218			    (sp->sender_all_done)) {
219				/*
220				 * We are doing differed cleanup. Last time
221				 * through when we took all the data the
222				 * sender_all_done was not set.
223				 */
224				if (sp->put_last_out == 0) {
225					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
226					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
227					    sp->sender_all_done,
228					    sp->length,
229					    sp->msg_is_complete,
230					    sp->put_last_out);
231				}
232				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
233				TAILQ_REMOVE(&strq->outqueue, sp, next);
234				sctp_free_remote_addr(sp->net);
235				if (sp->data) {
236					sctp_m_freem(sp->data);
237					sp->data = NULL;
238				}
239				sctp_free_a_strmoq(stcb, sp);
240				goto is_there_another;
241			} else {
242				unsent_data++;
243				continue;
244			}
245		}
246	}
247	SCTP_TCB_SEND_UNLOCK(stcb);
248	return (unsent_data);
249}
250
251static int
252sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
253    struct sctp_nets *net)
254{
255	struct sctp_init *init;
256	struct sctp_association *asoc;
257	struct sctp_nets *lnet;
258	unsigned int i;
259
260	init = &cp->init;
261	asoc = &stcb->asoc;
262	/* save off parameters */
263	asoc->peer_vtag = ntohl(init->initiate_tag);
264	asoc->peers_rwnd = ntohl(init->a_rwnd);
265	if (TAILQ_FIRST(&asoc->nets)) {
266		/* update any ssthresh's that may have a default */
267		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
268			lnet->ssthresh = asoc->peers_rwnd;
269
270			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
271				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
272			}
273		}
274	}
275	SCTP_TCB_SEND_LOCK(stcb);
276	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
277		unsigned int newcnt;
278		struct sctp_stream_out *outs;
279		struct sctp_stream_queue_pending *sp;
280
281		/* cut back on number of streams */
282		newcnt = ntohs(init->num_inbound_streams);
283		/* This if is probably not needed but I am cautious */
284		if (asoc->strmout) {
285			/* First make sure no data chunks are trapped */
286			for (i = newcnt; i < asoc->pre_open_streams; i++) {
287				outs = &asoc->strmout[i];
288				sp = TAILQ_FIRST(&outs->outqueue);
289				while (sp) {
290					TAILQ_REMOVE(&outs->outqueue, sp,
291					    next);
292					asoc->stream_queue_cnt--;
293					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
294					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
295					    sp, SCTP_SO_NOT_LOCKED);
296					if (sp->data) {
297						sctp_m_freem(sp->data);
298						sp->data = NULL;
299					}
300					sctp_free_remote_addr(sp->net);
301					sp->net = NULL;
302					/* Free the chunk */
303					SCTP_PRINTF("sp:%p tcb:%p weird free case\n",
304					    sp, stcb);
305
306					sctp_free_a_strmoq(stcb, sp);
307					/* sa_ignore FREED_MEMORY */
308					sp = TAILQ_FIRST(&outs->outqueue);
309				}
310			}
311		}
312		/* cut back the count and abandon the upper streams */
313		asoc->pre_open_streams = newcnt;
314	}
315	SCTP_TCB_SEND_UNLOCK(stcb);
316	asoc->streamoutcnt = asoc->pre_open_streams;
317	/* init tsn's */
318	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
319	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
320		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
321	}
322	/* This is the next one we expect */
323	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
324
325	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
326	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
327	asoc->last_echo_tsn = asoc->asconf_seq_in;
328	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
329	/* open the requested streams */
330
331	if (asoc->strmin != NULL) {
332		/* Free the old ones */
333		struct sctp_queued_to_read *ctl;
334
335		for (i = 0; i < asoc->streamincnt; i++) {
336			ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
337			while (ctl) {
338				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
339				sctp_free_remote_addr(ctl->whoFrom);
340				ctl->whoFrom = NULL;
341				sctp_m_freem(ctl->data);
342				ctl->data = NULL;
343				sctp_free_a_readq(stcb, ctl);
344				ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
345			}
346		}
347		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
348	}
349	asoc->streamincnt = ntohs(init->num_outbound_streams);
350	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
351		asoc->streamincnt = MAX_SCTP_STREAMS;
352	}
353	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
354	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
355	if (asoc->strmin == NULL) {
356		/* we didn't get memory for the streams! */
357		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
358		return (-1);
359	}
360	for (i = 0; i < asoc->streamincnt; i++) {
361		asoc->strmin[i].stream_no = i;
362		asoc->strmin[i].last_sequence_delivered = 0xffff;
363		/*
364		 * U-stream ranges will be set when the cookie is unpacked.
365		 * Or for the INIT sender they are un set (if pr-sctp not
366		 * supported) when the INIT-ACK arrives.
367		 */
368		TAILQ_INIT(&asoc->strmin[i].inqueue);
369		asoc->strmin[i].delivery_started = 0;
370	}
371	/*
372	 * load_address_from_init will put the addresses into the
373	 * association when the COOKIE is processed or the INIT-ACK is
374	 * processed. Both types of COOKIE's existing and new call this
375	 * routine. It will remove addresses that are no longer in the
376	 * association (for the restarting case where addresses are
377	 * removed). Up front when the INIT arrives we will discard it if it
378	 * is a restart and new addresses have been added.
379	 */
380	/* sa_ignore MEMLEAK */
381	return (0);
382}
383
384/*
385 * INIT-ACK message processing/consumption returns value < 0 on error
386 */
387static int
388sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
389    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
390    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
391{
392	struct sctp_association *asoc;
393	struct mbuf *op_err;
394	int retval, abort_flag;
395	uint32_t initack_limit;
396
397	/* First verify that we have no illegal param's */
398	abort_flag = 0;
399	op_err = NULL;
400
401	op_err = sctp_arethere_unrecognized_parameters(m,
402	    (offset + sizeof(struct sctp_init_chunk)),
403	    &abort_flag, (struct sctp_chunkhdr *)cp);
404	if (abort_flag) {
405		/* Send an abort and notify peer */
406		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
407		*abort_no_unlock = 1;
408		return (-1);
409	}
410	asoc = &stcb->asoc;
411	/* process the peer's parameters in the INIT-ACK */
412	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
413	if (retval < 0) {
414		return (retval);
415	}
416	initack_limit = offset + ntohs(cp->ch.chunk_length);
417	/* load all addresses */
418	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
419	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
420	    NULL))) {
421		/* Huh, we should abort */
422		SCTPDBG(SCTP_DEBUG_INPUT1,
423		    "Load addresses from INIT causes an abort %d\n",
424		    retval);
425		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
426		    NULL, 0, net->port);
427		*abort_no_unlock = 1;
428		return (-1);
429	}
430	/* if the peer doesn't support asconf, flush the asconf queue */
431	if (asoc->peer_supports_asconf == 0) {
432		struct sctp_asconf_addr *aparam;
433
434		while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
435			/* sa_ignore FREED_MEMORY */
436			aparam = TAILQ_FIRST(&asoc->asconf_queue);
437			TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
438			SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
439		}
440	}
441	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
442	    stcb->asoc.local_hmacs);
443	if (op_err) {
444		sctp_queue_op_err(stcb, op_err);
445		/* queuing will steal away the mbuf chain to the out queue */
446		op_err = NULL;
447	}
448	/* extract the cookie and queue it to "echo" it back... */
449	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
450		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
451		    stcb->asoc.overall_error_count,
452		    0,
453		    SCTP_FROM_SCTP_INPUT,
454		    __LINE__);
455	}
456	stcb->asoc.overall_error_count = 0;
457	net->error_count = 0;
458
459	/*
460	 * Cancel the INIT timer, We do this first before queueing the
461	 * cookie. We always cancel at the primary to assue that we are
462	 * canceling the timer started by the INIT which always goes to the
463	 * primary.
464	 */
465	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
466	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
467
468	/* calculate the RTO */
469	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy);
470
471	retval = sctp_send_cookie_echo(m, offset, stcb, net);
472	if (retval < 0) {
473		/*
474		 * No cookie, we probably should send a op error. But in any
475		 * case if there is no cookie in the INIT-ACK, we can
476		 * abandon the peer, its broke.
477		 */
478		if (retval == -3) {
479			/* We abort with an error of missing mandatory param */
480			op_err =
481			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
482			if (op_err) {
483				/*
484				 * Expand beyond to include the mandatory
485				 * param cookie
486				 */
487				struct sctp_inv_mandatory_param *mp;
488
489				SCTP_BUF_LEN(op_err) =
490				    sizeof(struct sctp_inv_mandatory_param);
491				mp = mtod(op_err,
492				    struct sctp_inv_mandatory_param *);
493				/* Subtract the reserved param */
494				mp->length =
495				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
496				mp->num_param = htonl(1);
497				mp->param = htons(SCTP_STATE_COOKIE);
498				mp->resv = 0;
499			}
500			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
501			    sh, op_err, 0, net->port);
502			*abort_no_unlock = 1;
503		}
504		return (retval);
505	}
506	return (0);
507}
508
509static void
510sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
511    struct sctp_tcb *stcb, struct sctp_nets *net)
512{
513	struct sockaddr_storage store;
514	struct sockaddr_in *sin;
515	struct sockaddr_in6 *sin6;
516	struct sctp_nets *r_net;
517	struct timeval tv;
518	int req_prim = 0;
519
520	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
521		/* Invalid length */
522		return;
523	}
524	sin = (struct sockaddr_in *)&store;
525	sin6 = (struct sockaddr_in6 *)&store;
526
527	memset(&store, 0, sizeof(store));
528	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
529	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
530		sin->sin_family = cp->heartbeat.hb_info.addr_family;
531		sin->sin_len = cp->heartbeat.hb_info.addr_len;
532		sin->sin_port = stcb->rport;
533		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
534		    sizeof(sin->sin_addr));
535	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
536	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
537		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
538		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
539		sin6->sin6_port = stcb->rport;
540		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
541		    sizeof(sin6->sin6_addr));
542	} else {
543		return;
544	}
545	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
546	if (r_net == NULL) {
547		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
548		return;
549	}
550	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
551	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
552	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
553		/*
554		 * If the its a HB and it's random value is correct when can
555		 * confirm the destination.
556		 */
557		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
558		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
559			stcb->asoc.primary_destination = r_net;
560			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
561			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
562			r_net = TAILQ_FIRST(&stcb->asoc.nets);
563			if (r_net != stcb->asoc.primary_destination) {
564				/*
565				 * first one on the list is NOT the primary
566				 * sctp_cmpaddr() is much more efficent if
567				 * the primary is the first on the list,
568				 * make it so.
569				 */
570				TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
571				TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
572			}
573			req_prim = 1;
574		}
575		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
576		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
577	}
578	r_net->error_count = 0;
579	r_net->hb_responded = 1;
580	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
581	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
582	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
583		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
584		r_net->dest_state |= SCTP_ADDR_REACHABLE;
585		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
586		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED);
587		/* now was it the primary? if so restore */
588		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
589			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
590		}
591	}
592	/*
593	 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state,
594	 * set the destination to active state and set the cwnd to one or
595	 * two MTU's based on whether PF1 or PF2 is being used. If a T3
596	 * timer is running, for the destination, stop the timer because a
597	 * PF-heartbeat was received.
598	 */
599	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
600	    SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
601	    (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
602		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
603			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
604			    stcb, net,
605			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
606		}
607		net->dest_state &= ~SCTP_ADDR_PF;
608		net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
609		SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
610		    net, net->cwnd);
611	}
612	/* Now lets do a RTO with this */
613	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy);
614	/* Mobility adaptation */
615	if (req_prim) {
616		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
617		    SCTP_MOBILITY_BASE) ||
618		    sctp_is_mobility_feature_on(stcb->sctp_ep,
619		    SCTP_MOBILITY_FASTHANDOFF)) &&
620		    sctp_is_mobility_feature_on(stcb->sctp_ep,
621		    SCTP_MOBILITY_PRIM_DELETED)) {
622
623			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
624			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
625			    SCTP_MOBILITY_FASTHANDOFF)) {
626				sctp_assoc_immediate_retrans(stcb,
627				    stcb->asoc.primary_destination);
628			}
629			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
630			    SCTP_MOBILITY_BASE)) {
631				sctp_move_chunks_from_deleted_prim(stcb,
632				    stcb->asoc.primary_destination);
633			}
634			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
635			    stcb->asoc.deleted_primary);
636		}
637	}
638}
639
640static void
641sctp_handle_abort(struct sctp_abort_chunk *cp,
642    struct sctp_tcb *stcb, struct sctp_nets *net)
643{
644#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
645	struct socket *so;
646
647#endif
648
649	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
650	if (stcb == NULL)
651		return;
652
653	/* stop any receive timers */
654	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
655	/* notify user of the abort and clean up... */
656	sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
657	/* free the tcb */
658#if defined(SCTP_PANIC_ON_ABORT)
659	printf("stcb:%p state:%d rport:%d net:%p\n",
660	    stcb, stcb->asoc.state, stcb->rport, net);
661	if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
662		panic("Received an ABORT");
663	} else {
664		printf("No panic its in state %x closed\n", stcb->asoc.state);
665	}
666#endif
667	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
668	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
669	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
670		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
671	}
672#ifdef SCTP_ASOCLOG_OF_TSNS
673	sctp_print_out_track_log(stcb);
674#endif
675#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
676	so = SCTP_INP_SO(stcb->sctp_ep);
677	atomic_add_int(&stcb->asoc.refcnt, 1);
678	SCTP_TCB_UNLOCK(stcb);
679	SCTP_SOCKET_LOCK(so, 1);
680	SCTP_TCB_LOCK(stcb);
681	atomic_subtract_int(&stcb->asoc.refcnt, 1);
682#endif
683	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
684	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
685	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
686#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
687	SCTP_SOCKET_UNLOCK(so, 1);
688#endif
689	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
690}
691
692static void
693sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
694    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
695{
696	struct sctp_association *asoc;
697	int some_on_streamwheel;
698
699#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
700	struct socket *so;
701
702#endif
703
704	SCTPDBG(SCTP_DEBUG_INPUT2,
705	    "sctp_handle_shutdown: handling SHUTDOWN\n");
706	if (stcb == NULL)
707		return;
708	asoc = &stcb->asoc;
709	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
710	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
711		return;
712	}
713	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
714		/* Shutdown NOT the expected size */
715		return;
716	} else {
717		sctp_update_acked(stcb, cp, net, abort_flag);
718	}
719	if (asoc->control_pdapi) {
720		/*
721		 * With a normal shutdown we assume the end of last record.
722		 */
723		SCTP_INP_READ_LOCK(stcb->sctp_ep);
724		asoc->control_pdapi->end_added = 1;
725		asoc->control_pdapi->pdapi_aborted = 1;
726		asoc->control_pdapi = NULL;
727		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
728#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
729		so = SCTP_INP_SO(stcb->sctp_ep);
730		atomic_add_int(&stcb->asoc.refcnt, 1);
731		SCTP_TCB_UNLOCK(stcb);
732		SCTP_SOCKET_LOCK(so, 1);
733		SCTP_TCB_LOCK(stcb);
734		atomic_subtract_int(&stcb->asoc.refcnt, 1);
735		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
736			/* assoc was freed while we were unlocked */
737			SCTP_SOCKET_UNLOCK(so, 1);
738			return;
739		}
740#endif
741		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
742#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
743		SCTP_SOCKET_UNLOCK(so, 1);
744#endif
745	}
746	/* goto SHUTDOWN_RECEIVED state to block new requests */
747	if (stcb->sctp_socket) {
748		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
749		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
750		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
751			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
752			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
753			/*
754			 * notify upper layer that peer has initiated a
755			 * shutdown
756			 */
757			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
758
759			/* reset time */
760			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
761		}
762	}
763	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
764		/*
765		 * stop the shutdown timer, since we WILL move to
766		 * SHUTDOWN-ACK-SENT.
767		 */
768		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
769	}
770	/* Now is there unsent data on a stream somewhere? */
771	some_on_streamwheel = sctp_is_there_unsent_data(stcb);
772
773	if (!TAILQ_EMPTY(&asoc->send_queue) ||
774	    !TAILQ_EMPTY(&asoc->sent_queue) ||
775	    some_on_streamwheel) {
776		/* By returning we will push more data out */
777		return;
778	} else {
779		/* no outstanding data to send, so move on... */
780		/* send SHUTDOWN-ACK */
781		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
782		/* move to SHUTDOWN-ACK-SENT state */
783		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
784		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
785			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
786		}
787		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
788		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
789		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net,
790		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
791		/* start SHUTDOWN timer */
792		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
793		    stcb, net);
794	}
795}
796
797static void
798sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
799    struct sctp_tcb *stcb, struct sctp_nets *net)
800{
801	struct sctp_association *asoc;
802
803#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
804	struct socket *so;
805
806	so = SCTP_INP_SO(stcb->sctp_ep);
807#endif
808	SCTPDBG(SCTP_DEBUG_INPUT2,
809	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
810	if (stcb == NULL)
811		return;
812
813	asoc = &stcb->asoc;
814	/* process according to association state */
815	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
816	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
817		/* unexpected SHUTDOWN-ACK... so ignore... */
818		SCTP_TCB_UNLOCK(stcb);
819		return;
820	}
821	if (asoc->control_pdapi) {
822		/*
823		 * With a normal shutdown we assume the end of last record.
824		 */
825		SCTP_INP_READ_LOCK(stcb->sctp_ep);
826		asoc->control_pdapi->end_added = 1;
827		asoc->control_pdapi->pdapi_aborted = 1;
828		asoc->control_pdapi = NULL;
829		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
830#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
831		atomic_add_int(&stcb->asoc.refcnt, 1);
832		SCTP_TCB_UNLOCK(stcb);
833		SCTP_SOCKET_LOCK(so, 1);
834		SCTP_TCB_LOCK(stcb);
835		atomic_subtract_int(&stcb->asoc.refcnt, 1);
836		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
837			/* assoc was freed while we were unlocked */
838			SCTP_SOCKET_UNLOCK(so, 1);
839			return;
840		}
841#endif
842		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
843#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
844		SCTP_SOCKET_UNLOCK(so, 1);
845#endif
846	}
847	/* are the queues empty? */
848	if (!TAILQ_EMPTY(&asoc->send_queue) ||
849	    !TAILQ_EMPTY(&asoc->sent_queue) ||
850	    !TAILQ_EMPTY(&asoc->out_wheel)) {
851		sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
852	}
853	/* stop the timer */
854	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
855	/* send SHUTDOWN-COMPLETE */
856	sctp_send_shutdown_complete(stcb, net);
857	/* notify upper layer protocol */
858	if (stcb->sctp_socket) {
859		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
860		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
861		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
862			/* Set the connected flag to disconnected */
863			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
864		}
865	}
866	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
867	/* free the TCB but first save off the ep */
868#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
869	atomic_add_int(&stcb->asoc.refcnt, 1);
870	SCTP_TCB_UNLOCK(stcb);
871	SCTP_SOCKET_LOCK(so, 1);
872	SCTP_TCB_LOCK(stcb);
873	atomic_subtract_int(&stcb->asoc.refcnt, 1);
874#endif
875	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
876	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
877#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
878	SCTP_SOCKET_UNLOCK(so, 1);
879#endif
880}
881
882/*
883 * Skip past the param header and then we will find the chunk that caused the
884 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
885 * our peer must be broken.
886 */
887static void
888sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
889    struct sctp_nets *net)
890{
891	struct sctp_chunkhdr *chk;
892
893	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
894	switch (chk->chunk_type) {
895	case SCTP_ASCONF_ACK:
896	case SCTP_ASCONF:
897		sctp_asconf_cleanup(stcb, net);
898		break;
899	case SCTP_FORWARD_CUM_TSN:
900		stcb->asoc.peer_supports_prsctp = 0;
901		break;
902	default:
903		SCTPDBG(SCTP_DEBUG_INPUT2,
904		    "Peer does not support chunk type %d(%x)??\n",
905		    chk->chunk_type, (uint32_t) chk->chunk_type);
906		break;
907	}
908}
909
910/*
911 * Skip past the param header and then we will find the param that caused the
912 * problem.  There are a number of param's in a ASCONF OR the prsctp param
913 * these will turn of specific features.
914 */
915static void
916sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
917{
918	struct sctp_paramhdr *pbad;
919
920	pbad = phdr + 1;
921	switch (ntohs(pbad->param_type)) {
922		/* pr-sctp draft */
923	case SCTP_PRSCTP_SUPPORTED:
924		stcb->asoc.peer_supports_prsctp = 0;
925		break;
926	case SCTP_SUPPORTED_CHUNK_EXT:
927		break;
928		/* draft-ietf-tsvwg-addip-sctp */
929	case SCTP_ECN_NONCE_SUPPORTED:
930		stcb->asoc.peer_supports_ecn_nonce = 0;
931		stcb->asoc.ecn_nonce_allowed = 0;
932		stcb->asoc.ecn_allowed = 0;
933		break;
934	case SCTP_ADD_IP_ADDRESS:
935	case SCTP_DEL_IP_ADDRESS:
936	case SCTP_SET_PRIM_ADDR:
937		stcb->asoc.peer_supports_asconf = 0;
938		break;
939	case SCTP_SUCCESS_REPORT:
940	case SCTP_ERROR_CAUSE_IND:
941		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
942		SCTPDBG(SCTP_DEBUG_INPUT2,
943		    "Turning off ASCONF to this strange peer\n");
944		stcb->asoc.peer_supports_asconf = 0;
945		break;
946	default:
947		SCTPDBG(SCTP_DEBUG_INPUT2,
948		    "Peer does not support param type %d(%x)??\n",
949		    pbad->param_type, (uint32_t) pbad->param_type);
950		break;
951	}
952}
953
954static int
955sctp_handle_error(struct sctp_chunkhdr *ch,
956    struct sctp_tcb *stcb, struct sctp_nets *net)
957{
958	int chklen;
959	struct sctp_paramhdr *phdr;
960	uint16_t error_type;
961	uint16_t error_len;
962	struct sctp_association *asoc;
963	int adjust;
964
965#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
966	struct socket *so;
967
968#endif
969
970	/* parse through all of the errors and process */
971	asoc = &stcb->asoc;
972	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
973	    sizeof(struct sctp_chunkhdr));
974	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
975	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
976		/* Process an Error Cause */
977		error_type = ntohs(phdr->param_type);
978		error_len = ntohs(phdr->param_length);
979		if ((error_len > chklen) || (error_len == 0)) {
980			/* invalid param length for this param */
981			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
982			    chklen, error_len);
983			return (0);
984		}
985		switch (error_type) {
986		case SCTP_CAUSE_INVALID_STREAM:
987		case SCTP_CAUSE_MISSING_PARAM:
988		case SCTP_CAUSE_INVALID_PARAM:
989		case SCTP_CAUSE_NO_USER_DATA:
990			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
991			    error_type);
992			break;
993		case SCTP_CAUSE_STALE_COOKIE:
994			/*
995			 * We only act if we have echoed a cookie and are
996			 * waiting.
997			 */
998			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
999				int *p;
1000
1001				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1002				/* Save the time doubled */
1003				asoc->cookie_preserve_req = ntohl(*p) << 1;
1004				asoc->stale_cookie_count++;
1005				if (asoc->stale_cookie_count >
1006				    asoc->max_init_times) {
1007					sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
1008					/* now free the asoc */
1009#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1010					so = SCTP_INP_SO(stcb->sctp_ep);
1011					atomic_add_int(&stcb->asoc.refcnt, 1);
1012					SCTP_TCB_UNLOCK(stcb);
1013					SCTP_SOCKET_LOCK(so, 1);
1014					SCTP_TCB_LOCK(stcb);
1015					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1016#endif
1017					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1018					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1019#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1020					SCTP_SOCKET_UNLOCK(so, 1);
1021#endif
1022					return (-1);
1023				}
1024				/* blast back to INIT state */
1025				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1026				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1027
1028				sctp_stop_all_cookie_timers(stcb);
1029				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1030			}
1031			break;
1032		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1033			/*
1034			 * Nothing we can do here, we don't do hostname
1035			 * addresses so if the peer does not like my IPv6
1036			 * (or IPv4 for that matter) it does not matter. If
1037			 * they don't support that type of address, they can
1038			 * NOT possibly get that packet type... i.e. with no
1039			 * IPv6 you can't recieve a IPv6 packet. so we can
1040			 * safely ignore this one. If we ever added support
1041			 * for HOSTNAME Addresses, then we would need to do
1042			 * something here.
1043			 */
1044			break;
1045		case SCTP_CAUSE_UNRECOG_CHUNK:
1046			sctp_process_unrecog_chunk(stcb, phdr, net);
1047			break;
1048		case SCTP_CAUSE_UNRECOG_PARAM:
1049			sctp_process_unrecog_param(stcb, phdr);
1050			break;
1051		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1052			/*
1053			 * We ignore this since the timer will drive out a
1054			 * new cookie anyway and there timer will drive us
1055			 * to send a SHUTDOWN_COMPLETE. We can't send one
1056			 * here since we don't have their tag.
1057			 */
1058			break;
1059		case SCTP_CAUSE_DELETING_LAST_ADDR:
1060		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1061		case SCTP_CAUSE_DELETING_SRC_ADDR:
1062			/*
1063			 * We should NOT get these here, but in a
1064			 * ASCONF-ACK.
1065			 */
1066			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1067			    error_type);
1068			break;
1069		case SCTP_CAUSE_OUT_OF_RESC:
1070			/*
1071			 * And what, pray tell do we do with the fact that
1072			 * the peer is out of resources? Not really sure we
1073			 * could do anything but abort. I suspect this
1074			 * should have came WITH an abort instead of in a
1075			 * OP-ERROR.
1076			 */
1077			break;
1078		default:
1079			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1080			    error_type);
1081			break;
1082		}
1083		adjust = SCTP_SIZE32(error_len);
1084		chklen -= adjust;
1085		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1086	}
1087	return (0);
1088}
1089
1090static int
1091sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1092    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1093    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
1094{
1095	struct sctp_init_ack *init_ack;
1096	struct mbuf *op_err;
1097
1098	SCTPDBG(SCTP_DEBUG_INPUT2,
1099	    "sctp_handle_init_ack: handling INIT-ACK\n");
1100
1101	if (stcb == NULL) {
1102		SCTPDBG(SCTP_DEBUG_INPUT2,
1103		    "sctp_handle_init_ack: TCB is null\n");
1104		return (-1);
1105	}
1106	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1107		/* Invalid length */
1108		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1109		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1110		    op_err, 0, net->port);
1111		*abort_no_unlock = 1;
1112		return (-1);
1113	}
1114	init_ack = &cp->init;
1115	/* validate parameters */
1116	if (init_ack->initiate_tag == 0) {
1117		/* protocol error... send an abort */
1118		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1119		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1120		    op_err, 0, net->port);
1121		*abort_no_unlock = 1;
1122		return (-1);
1123	}
1124	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1125		/* protocol error... send an abort */
1126		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1127		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1128		    op_err, 0, net->port);
1129		*abort_no_unlock = 1;
1130		return (-1);
1131	}
1132	if (init_ack->num_inbound_streams == 0) {
1133		/* protocol error... send an abort */
1134		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1135		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1136		    op_err, 0, net->port);
1137		*abort_no_unlock = 1;
1138		return (-1);
1139	}
1140	if (init_ack->num_outbound_streams == 0) {
1141		/* protocol error... send an abort */
1142		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1143		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1144		    op_err, 0, net->port);
1145		*abort_no_unlock = 1;
1146		return (-1);
1147	}
1148	/* process according to association state... */
1149	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1150	case SCTP_STATE_COOKIE_WAIT:
1151		/* this is the expected state for this chunk */
1152		/* process the INIT-ACK parameters */
1153		if (stcb->asoc.primary_destination->dest_state &
1154		    SCTP_ADDR_UNCONFIRMED) {
1155			/*
1156			 * The primary is where we sent the INIT, we can
1157			 * always consider it confirmed when the INIT-ACK is
1158			 * returned. Do this before we load addresses
1159			 * though.
1160			 */
1161			stcb->asoc.primary_destination->dest_state &=
1162			    ~SCTP_ADDR_UNCONFIRMED;
1163			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1164			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1165		}
1166		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
1167		    net, abort_no_unlock, vrf_id) < 0) {
1168			/* error in parsing parameters */
1169			return (-1);
1170		}
1171		/* update our state */
1172		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1173		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1174
1175		/* reset the RTO calc */
1176		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1177			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1178			    stcb->asoc.overall_error_count,
1179			    0,
1180			    SCTP_FROM_SCTP_INPUT,
1181			    __LINE__);
1182		}
1183		stcb->asoc.overall_error_count = 0;
1184		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1185		/*
1186		 * collapse the init timer back in case of a exponential
1187		 * backoff
1188		 */
1189		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1190		    stcb, net);
1191		/*
1192		 * the send at the end of the inbound data processing will
1193		 * cause the cookie to be sent
1194		 */
1195		break;
1196	case SCTP_STATE_SHUTDOWN_SENT:
1197		/* incorrect state... discard */
1198		break;
1199	case SCTP_STATE_COOKIE_ECHOED:
1200		/* incorrect state... discard */
1201		break;
1202	case SCTP_STATE_OPEN:
1203		/* incorrect state... discard */
1204		break;
1205	case SCTP_STATE_EMPTY:
1206	case SCTP_STATE_INUSE:
1207	default:
1208		/* incorrect state... discard */
1209		return (-1);
1210		break;
1211	}
1212	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1213	return (0);
1214}
1215
1216
1217/*
1218 * handle a state cookie for an existing association m: input packet mbuf
1219 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1220 * "split" mbuf and the cookie signature does not exist offset: offset into
1221 * mbuf to the cookie-echo chunk
1222 */
1223static struct sctp_tcb *
1224sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1225    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1226    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
1227    struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
1228    uint32_t vrf_id)
1229{
1230	struct sctp_association *asoc;
1231	struct sctp_init_chunk *init_cp, init_buf;
1232	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1233	int chk_length;
1234	int init_offset, initack_offset, i;
1235	int retval;
1236	int spec_flag = 0;
1237	uint32_t how_indx;
1238
1239	/* I know that the TCB is non-NULL from the caller */
1240	asoc = &stcb->asoc;
1241	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1242		if (asoc->cookie_how[how_indx] == 0)
1243			break;
1244	}
1245	if (how_indx < sizeof(asoc->cookie_how)) {
1246		asoc->cookie_how[how_indx] = 1;
1247	}
1248	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1249		/* SHUTDOWN came in after sending INIT-ACK */
1250		struct mbuf *op_err;
1251		struct sctp_paramhdr *ph;
1252
1253		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1254		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1255		    0, M_DONTWAIT, 1, MT_DATA);
1256		if (op_err == NULL) {
1257			/* FOOBAR */
1258			return (NULL);
1259		}
1260		/* pre-reserve some space */
1261#ifdef INET6
1262		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1263#else
1264		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
1265#endif
1266		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1267		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1268		/* Set the len */
1269		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1270		ph = mtod(op_err, struct sctp_paramhdr *);
1271		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1272		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1273		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1274		    vrf_id, net->port);
1275		if (how_indx < sizeof(asoc->cookie_how))
1276			asoc->cookie_how[how_indx] = 2;
1277		return (NULL);
1278	}
1279	/*
1280	 * find and validate the INIT chunk in the cookie (peer's info) the
1281	 * INIT should start after the cookie-echo header struct (chunk
1282	 * header, state cookie header struct)
1283	 */
1284	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1285
1286	init_cp = (struct sctp_init_chunk *)
1287	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1288	    (uint8_t *) & init_buf);
1289	if (init_cp == NULL) {
1290		/* could not pull a INIT chunk in cookie */
1291		return (NULL);
1292	}
1293	chk_length = ntohs(init_cp->ch.chunk_length);
1294	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1295		return (NULL);
1296	}
1297	/*
1298	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1299	 * INIT-ACK follows the INIT chunk
1300	 */
1301	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1302	initack_cp = (struct sctp_init_ack_chunk *)
1303	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1304	    (uint8_t *) & initack_buf);
1305	if (initack_cp == NULL) {
1306		/* could not pull INIT-ACK chunk in cookie */
1307		return (NULL);
1308	}
1309	chk_length = ntohs(initack_cp->ch.chunk_length);
1310	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1311		return (NULL);
1312	}
1313	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1314	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1315		/*
1316		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1317		 * to get into the OPEN state
1318		 */
1319		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1320			/*-
1321			 * Opps, this means that we somehow generated two vtag's
1322			 * the same. I.e. we did:
1323			 *  Us               Peer
1324			 *   <---INIT(tag=a)------
1325			 *   ----INIT-ACK(tag=t)-->
1326			 *   ----INIT(tag=t)------> *1
1327			 *   <---INIT-ACK(tag=a)---
1328                         *   <----CE(tag=t)------------- *2
1329			 *
1330			 * At point *1 we should be generating a different
1331			 * tag t'. Which means we would throw away the CE and send
1332			 * ours instead. Basically this is case C (throw away side).
1333			 */
1334			if (how_indx < sizeof(asoc->cookie_how))
1335				asoc->cookie_how[how_indx] = 17;
1336			return (NULL);
1337
1338		}
1339		switch SCTP_GET_STATE
1340			(asoc) {
1341		case SCTP_STATE_COOKIE_WAIT:
1342		case SCTP_STATE_COOKIE_ECHOED:
1343			/*
1344			 * INIT was sent but got a COOKIE_ECHO with the
1345			 * correct tags... just accept it...but we must
1346			 * process the init so that we can make sure we have
1347			 * the right seq no's.
1348			 */
1349			/* First we must process the INIT !! */
1350			retval = sctp_process_init(init_cp, stcb, net);
1351			if (retval < 0) {
1352				if (how_indx < sizeof(asoc->cookie_how))
1353					asoc->cookie_how[how_indx] = 3;
1354				return (NULL);
1355			}
1356			/* we have already processed the INIT so no problem */
1357			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1358			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1359			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1360			/* update current state */
1361			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1362				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1363			else
1364				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1365
1366			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1367			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1368				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1369				    stcb->sctp_ep, stcb, asoc->primary_destination);
1370			}
1371			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1372			sctp_stop_all_cookie_timers(stcb);
1373			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1374			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1375			    (inp->sctp_socket->so_qlimit == 0)
1376			    ) {
1377#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1378				struct socket *so;
1379
1380#endif
1381				/*
1382				 * Here is where collision would go if we
1383				 * did a connect() and instead got a
1384				 * init/init-ack/cookie done before the
1385				 * init-ack came back..
1386				 */
1387				stcb->sctp_ep->sctp_flags |=
1388				    SCTP_PCB_FLAGS_CONNECTED;
1389#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1390				so = SCTP_INP_SO(stcb->sctp_ep);
1391				atomic_add_int(&stcb->asoc.refcnt, 1);
1392				SCTP_TCB_UNLOCK(stcb);
1393				SCTP_SOCKET_LOCK(so, 1);
1394				SCTP_TCB_LOCK(stcb);
1395				atomic_add_int(&stcb->asoc.refcnt, -1);
1396				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1397					SCTP_SOCKET_UNLOCK(so, 1);
1398					return (NULL);
1399				}
1400#endif
1401				soisconnected(stcb->sctp_socket);
1402#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1403				SCTP_SOCKET_UNLOCK(so, 1);
1404#endif
1405			}
1406			/* notify upper layer */
1407			*notification = SCTP_NOTIFY_ASSOC_UP;
1408			/*
1409			 * since we did not send a HB make sure we don't
1410			 * double things
1411			 */
1412			net->hb_responded = 1;
1413			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1414			    &cookie->time_entered, sctp_align_unsafe_makecopy);
1415
1416			if (stcb->asoc.sctp_autoclose_ticks &&
1417			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1418				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1419				    inp, stcb, NULL);
1420			}
1421			break;
1422		default:
1423			/*
1424			 * we're in the OPEN state (or beyond), so peer must
1425			 * have simply lost the COOKIE-ACK
1426			 */
1427			break;
1428			}	/* end switch */
1429		sctp_stop_all_cookie_timers(stcb);
1430		/*
1431		 * We ignore the return code here.. not sure if we should
1432		 * somehow abort.. but we do have an existing asoc. This
1433		 * really should not fail.
1434		 */
1435		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1436		    init_offset + sizeof(struct sctp_init_chunk),
1437		    initack_offset, sh, init_src)) {
1438			if (how_indx < sizeof(asoc->cookie_how))
1439				asoc->cookie_how[how_indx] = 4;
1440			return (NULL);
1441		}
1442		/* respond with a COOKIE-ACK */
1443		sctp_toss_old_cookies(stcb, asoc);
1444		sctp_send_cookie_ack(stcb);
1445		if (how_indx < sizeof(asoc->cookie_how))
1446			asoc->cookie_how[how_indx] = 5;
1447		return (stcb);
1448	}
1449	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1450	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1451	    cookie->tie_tag_my_vtag == 0 &&
1452	    cookie->tie_tag_peer_vtag == 0) {
1453		/*
1454		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1455		 */
1456		if (how_indx < sizeof(asoc->cookie_how))
1457			asoc->cookie_how[how_indx] = 6;
1458		return (NULL);
1459	}
1460	if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
1461	    (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
1462	    init_cp->init.initiate_tag == 0)) {
1463		/*
1464		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1465		 * should be ok, re-accept peer info
1466		 */
1467		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1468			/*
1469			 * Extension of case C. If we hit this, then the
1470			 * random number generator returned the same vtag
1471			 * when we first sent our INIT-ACK and when we later
1472			 * sent our INIT. The side with the seq numbers that
1473			 * are different will be the one that normnally
1474			 * would have hit case C. This in effect "extends"
1475			 * our vtags in this collision case to be 64 bits.
1476			 * The same collision could occur aka you get both
1477			 * vtag and seq number the same twice in a row.. but
1478			 * is much less likely. If it did happen then we
1479			 * would proceed through and bring up the assoc.. we
1480			 * may end up with the wrong stream setup however..
1481			 * which would be bad.. but there is no way to
1482			 * tell.. until we send on a stream that does not
1483			 * exist :-)
1484			 */
1485			if (how_indx < sizeof(asoc->cookie_how))
1486				asoc->cookie_how[how_indx] = 7;
1487
1488			return (NULL);
1489		}
1490		if (how_indx < sizeof(asoc->cookie_how))
1491			asoc->cookie_how[how_indx] = 8;
1492		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1493		sctp_stop_all_cookie_timers(stcb);
1494		/*
1495		 * since we did not send a HB make sure we don't double
1496		 * things
1497		 */
1498		net->hb_responded = 1;
1499		if (stcb->asoc.sctp_autoclose_ticks &&
1500		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1501			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1502			    NULL);
1503		}
1504		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1505		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1506
1507		/* Note last_cwr_tsn? where is this used? */
1508		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1509		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1510			/*
1511			 * Ok the peer probably discarded our data (if we
1512			 * echoed a cookie+data). So anything on the
1513			 * sent_queue should be marked for retransmit, we
1514			 * may not get something to kick us so it COULD
1515			 * still take a timeout to move these.. but it can't
1516			 * hurt to mark them.
1517			 */
1518			struct sctp_tmit_chunk *chk;
1519
1520			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1521				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1522					chk->sent = SCTP_DATAGRAM_RESEND;
1523					sctp_flight_size_decrease(chk);
1524					sctp_total_flight_decrease(stcb, chk);
1525					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1526					spec_flag++;
1527				}
1528			}
1529
1530		}
1531		/* process the INIT info (peer's info) */
1532		retval = sctp_process_init(init_cp, stcb, net);
1533		if (retval < 0) {
1534			if (how_indx < sizeof(asoc->cookie_how))
1535				asoc->cookie_how[how_indx] = 9;
1536			return (NULL);
1537		}
1538		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1539		    init_offset + sizeof(struct sctp_init_chunk),
1540		    initack_offset, sh, init_src)) {
1541			if (how_indx < sizeof(asoc->cookie_how))
1542				asoc->cookie_how[how_indx] = 10;
1543			return (NULL);
1544		}
1545		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1546		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1547			*notification = SCTP_NOTIFY_ASSOC_UP;
1548
1549			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1550			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1551			    (inp->sctp_socket->so_qlimit == 0)) {
1552#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1553				struct socket *so;
1554
1555#endif
1556				stcb->sctp_ep->sctp_flags |=
1557				    SCTP_PCB_FLAGS_CONNECTED;
1558#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1559				so = SCTP_INP_SO(stcb->sctp_ep);
1560				atomic_add_int(&stcb->asoc.refcnt, 1);
1561				SCTP_TCB_UNLOCK(stcb);
1562				SCTP_SOCKET_LOCK(so, 1);
1563				SCTP_TCB_LOCK(stcb);
1564				atomic_add_int(&stcb->asoc.refcnt, -1);
1565				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1566					SCTP_SOCKET_UNLOCK(so, 1);
1567					return (NULL);
1568				}
1569#endif
1570				soisconnected(stcb->sctp_socket);
1571#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1572				SCTP_SOCKET_UNLOCK(so, 1);
1573#endif
1574			}
1575			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1576				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1577			else
1578				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1579			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1580		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1581			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1582		} else {
1583			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1584		}
1585		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1586		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1587			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1588			    stcb->sctp_ep, stcb, asoc->primary_destination);
1589		}
1590		sctp_stop_all_cookie_timers(stcb);
1591		sctp_toss_old_cookies(stcb, asoc);
1592		sctp_send_cookie_ack(stcb);
1593		if (spec_flag) {
1594			/*
1595			 * only if we have retrans set do we do this. What
1596			 * this call does is get only the COOKIE-ACK out and
1597			 * then when we return the normal call to
1598			 * sctp_chunk_output will get the retrans out behind
1599			 * this.
1600			 */
1601			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1602		}
1603		if (how_indx < sizeof(asoc->cookie_how))
1604			asoc->cookie_how[how_indx] = 11;
1605
1606		return (stcb);
1607	}
1608	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1609	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1610	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1611	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1612	    cookie->tie_tag_peer_vtag != 0) {
1613		struct sctpasochead *head;
1614
1615		/*
1616		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1617		 */
1618		/* temp code */
1619		if (how_indx < sizeof(asoc->cookie_how))
1620			asoc->cookie_how[how_indx] = 12;
1621		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1622		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1623
1624		*sac_assoc_id = sctp_get_associd(stcb);
1625		/* notify upper layer */
1626		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1627		atomic_add_int(&stcb->asoc.refcnt, 1);
1628		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1629		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1630		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1631			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1632		}
1633		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1634			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1635		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1636			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1637		}
1638		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1639			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1640			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1641			    stcb->sctp_ep, stcb, asoc->primary_destination);
1642
1643		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1644			/* move to OPEN state, if not in SHUTDOWN_SENT */
1645			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1646		}
1647		asoc->pre_open_streams =
1648		    ntohs(initack_cp->init.num_outbound_streams);
1649		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1650		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1651		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1652
1653		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1654		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1655
1656		asoc->str_reset_seq_in = asoc->init_seq_number;
1657
1658		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1659		if (asoc->mapping_array) {
1660			memset(asoc->mapping_array, 0,
1661			    asoc->mapping_array_size);
1662		}
1663		SCTP_TCB_UNLOCK(stcb);
1664		SCTP_INP_INFO_WLOCK();
1665		SCTP_INP_WLOCK(stcb->sctp_ep);
1666		SCTP_TCB_LOCK(stcb);
1667		atomic_add_int(&stcb->asoc.refcnt, -1);
1668		/* send up all the data */
1669		SCTP_TCB_SEND_LOCK(stcb);
1670
1671		sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED);
1672		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1673			stcb->asoc.strmout[i].stream_no = i;
1674			stcb->asoc.strmout[i].next_sequence_sent = 0;
1675			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1676		}
1677		/* process the INIT-ACK info (my info) */
1678		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1679		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1680
1681		/* pull from vtag hash */
1682		LIST_REMOVE(stcb, sctp_asocs);
1683		/* re-insert to new vtag position */
1684		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1685		    SCTP_BASE_INFO(hashasocmark))];
1686		/*
1687		 * put it in the bucket in the vtag hash of assoc's for the
1688		 * system
1689		 */
1690		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1691
1692		/* Is this the first restart? */
1693		if (stcb->asoc.in_restart_hash == 0) {
1694			/* Ok add it to assoc_id vtag hash */
1695			head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
1696			    SCTP_BASE_INFO(hashrestartmark))];
1697			LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
1698			stcb->asoc.in_restart_hash = 1;
1699		}
1700		/* process the INIT info (peer's info) */
1701		SCTP_TCB_SEND_UNLOCK(stcb);
1702		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1703		SCTP_INP_INFO_WUNLOCK();
1704
1705		retval = sctp_process_init(init_cp, stcb, net);
1706		if (retval < 0) {
1707			if (how_indx < sizeof(asoc->cookie_how))
1708				asoc->cookie_how[how_indx] = 13;
1709
1710			return (NULL);
1711		}
1712		/*
1713		 * since we did not send a HB make sure we don't double
1714		 * things
1715		 */
1716		net->hb_responded = 1;
1717
1718		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1719		    init_offset + sizeof(struct sctp_init_chunk),
1720		    initack_offset, sh, init_src)) {
1721			if (how_indx < sizeof(asoc->cookie_how))
1722				asoc->cookie_how[how_indx] = 14;
1723
1724			return (NULL);
1725		}
1726		/* respond with a COOKIE-ACK */
1727		sctp_stop_all_cookie_timers(stcb);
1728		sctp_toss_old_cookies(stcb, asoc);
1729		sctp_send_cookie_ack(stcb);
1730		if (how_indx < sizeof(asoc->cookie_how))
1731			asoc->cookie_how[how_indx] = 15;
1732
1733		return (stcb);
1734	}
1735	if (how_indx < sizeof(asoc->cookie_how))
1736		asoc->cookie_how[how_indx] = 16;
1737	/* all other cases... */
1738	return (NULL);
1739}
1740
1741
1742/*
1743 * handle a state cookie for a new association m: input packet mbuf chain--
1744 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1745 * and the cookie signature does not exist offset: offset into mbuf to the
1746 * cookie-echo chunk length: length of the cookie chunk to: where the init
1747 * was from returns a new TCB
1748 */
1749static struct sctp_tcb *
1750sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1751    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1752    struct sctp_inpcb *inp, struct sctp_nets **netp,
1753    struct sockaddr *init_src, int *notification,
1754    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1755    uint32_t vrf_id, uint16_t port)
1756{
1757	struct sctp_tcb *stcb;
1758	struct sctp_init_chunk *init_cp, init_buf;
1759	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1760	struct sockaddr_storage sa_store;
1761	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1762	struct sockaddr_in *sin;
1763	struct sockaddr_in6 *sin6;
1764	struct sctp_association *asoc;
1765	int chk_length;
1766	int init_offset, initack_offset, initack_limit;
1767	int retval;
1768	int error = 0;
1769	uint32_t old_tag;
1770	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1771
1772#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1773	struct socket *so;
1774
1775	so = SCTP_INP_SO(inp);
1776#endif
1777
1778	/*
1779	 * find and validate the INIT chunk in the cookie (peer's info) the
1780	 * INIT should start after the cookie-echo header struct (chunk
1781	 * header, state cookie header struct)
1782	 */
1783	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1784	init_cp = (struct sctp_init_chunk *)
1785	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1786	    (uint8_t *) & init_buf);
1787	if (init_cp == NULL) {
1788		/* could not pull a INIT chunk in cookie */
1789		SCTPDBG(SCTP_DEBUG_INPUT1,
1790		    "process_cookie_new: could not pull INIT chunk hdr\n");
1791		return (NULL);
1792	}
1793	chk_length = ntohs(init_cp->ch.chunk_length);
1794	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1795		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1796		return (NULL);
1797	}
1798	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1799	/*
1800	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1801	 * INIT-ACK follows the INIT chunk
1802	 */
1803	initack_cp = (struct sctp_init_ack_chunk *)
1804	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1805	    (uint8_t *) & initack_buf);
1806	if (initack_cp == NULL) {
1807		/* could not pull INIT-ACK chunk in cookie */
1808		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1809		return (NULL);
1810	}
1811	chk_length = ntohs(initack_cp->ch.chunk_length);
1812	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1813		return (NULL);
1814	}
1815	/*
1816	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1817	 * "initack_limit" value.  This is because the chk_length field
1818	 * includes the length of the cookie, but the cookie is omitted when
1819	 * the INIT and INIT_ACK are tacked onto the cookie...
1820	 */
1821	initack_limit = offset + cookie_len;
1822
1823	/*
1824	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
1825	 * and popluate
1826	 */
1827
1828	/*
1829	 * Here we do a trick, we set in NULL for the proc/thread argument.
1830	 * We do this since in effect we only use the p argument when the
1831	 * socket is unbound and we must do an implicit bind. Since we are
1832	 * getting a cookie, we cannot be unbound.
1833	 */
1834	stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
1835	    ntohl(initack_cp->init.initiate_tag), vrf_id,
1836	    (struct thread *)NULL
1837	    );
1838	if (stcb == NULL) {
1839		struct mbuf *op_err;
1840
1841		/* memory problem? */
1842		SCTPDBG(SCTP_DEBUG_INPUT1,
1843		    "process_cookie_new: no room for another TCB!\n");
1844		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1845
1846		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1847		    sh, op_err, vrf_id, port);
1848		return (NULL);
1849	}
1850	/* get the correct sctp_nets */
1851	if (netp)
1852		*netp = sctp_findnet(stcb, init_src);
1853
1854	asoc = &stcb->asoc;
1855	/* get scope variables out of cookie */
1856	asoc->ipv4_local_scope = cookie->ipv4_scope;
1857	asoc->site_scope = cookie->site_scope;
1858	asoc->local_scope = cookie->local_scope;
1859	asoc->loopback_scope = cookie->loopback_scope;
1860
1861	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
1862	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
1863		struct mbuf *op_err;
1864
1865		/*
1866		 * Houston we have a problem. The EP changed while the
1867		 * cookie was in flight. Only recourse is to abort the
1868		 * association.
1869		 */
1870		atomic_add_int(&stcb->asoc.refcnt, 1);
1871		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1872		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1873		    sh, op_err, vrf_id, port);
1874#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1875		SCTP_TCB_UNLOCK(stcb);
1876		SCTP_SOCKET_LOCK(so, 1);
1877		SCTP_TCB_LOCK(stcb);
1878#endif
1879		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1880		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1881#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1882		SCTP_SOCKET_UNLOCK(so, 1);
1883#endif
1884		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1885		return (NULL);
1886	}
1887	/* process the INIT-ACK info (my info) */
1888	old_tag = asoc->my_vtag;
1889	asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1890	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1891	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1892	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1893	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1894	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1895	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1896	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1897	asoc->str_reset_seq_in = asoc->init_seq_number;
1898
1899	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1900
1901	/* process the INIT info (peer's info) */
1902	if (netp)
1903		retval = sctp_process_init(init_cp, stcb, *netp);
1904	else
1905		retval = 0;
1906	if (retval < 0) {
1907		atomic_add_int(&stcb->asoc.refcnt, 1);
1908#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1909		SCTP_TCB_UNLOCK(stcb);
1910		SCTP_SOCKET_LOCK(so, 1);
1911		SCTP_TCB_LOCK(stcb);
1912#endif
1913		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1914#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1915		SCTP_SOCKET_UNLOCK(so, 1);
1916#endif
1917		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1918		return (NULL);
1919	}
1920	/* load all addresses */
1921	if (sctp_load_addresses_from_init(stcb, m, iphlen,
1922	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
1923	    init_src)) {
1924		atomic_add_int(&stcb->asoc.refcnt, 1);
1925#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1926		SCTP_TCB_UNLOCK(stcb);
1927		SCTP_SOCKET_LOCK(so, 1);
1928		SCTP_TCB_LOCK(stcb);
1929#endif
1930		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1931#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1932		SCTP_SOCKET_UNLOCK(so, 1);
1933#endif
1934		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1935		return (NULL);
1936	}
1937	/*
1938	 * verify any preceding AUTH chunk that was skipped
1939	 */
1940	/* pull the local authentication parameters from the cookie/init-ack */
1941	sctp_auth_get_cookie_params(stcb, m,
1942	    initack_offset + sizeof(struct sctp_init_ack_chunk),
1943	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
1944	if (auth_skipped) {
1945		struct sctp_auth_chunk *auth;
1946
1947		auth = (struct sctp_auth_chunk *)
1948		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
1949		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
1950			/* auth HMAC failed, dump the assoc and packet */
1951			SCTPDBG(SCTP_DEBUG_AUTH1,
1952			    "COOKIE-ECHO: AUTH failed\n");
1953			atomic_add_int(&stcb->asoc.refcnt, 1);
1954#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1955			SCTP_TCB_UNLOCK(stcb);
1956			SCTP_SOCKET_LOCK(so, 1);
1957			SCTP_TCB_LOCK(stcb);
1958#endif
1959			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
1960#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1961			SCTP_SOCKET_UNLOCK(so, 1);
1962#endif
1963			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1964			return (NULL);
1965		} else {
1966			/* remaining chunks checked... good to go */
1967			stcb->asoc.authenticated = 1;
1968		}
1969	}
1970	/* update current state */
1971	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
1972	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1973	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1974		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1975		    stcb->sctp_ep, stcb, asoc->primary_destination);
1976	}
1977	sctp_stop_all_cookie_timers(stcb);
1978	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
1979	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1980
1981	/*
1982	 * if we're doing ASCONFs, check to see if we have any new local
1983	 * addresses that need to get added to the peer (eg. addresses
1984	 * changed while cookie echo in flight).  This needs to be done
1985	 * after we go to the OPEN state to do the correct asconf
1986	 * processing. else, make sure we have the correct addresses in our
1987	 * lists
1988	 */
1989
1990	/* warning, we re-use sin, sin6, sa_store here! */
1991	/* pull in local_address (our "from" address) */
1992	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
1993		/* source addr is IPv4 */
1994		sin = (struct sockaddr_in *)initack_src;
1995		memset(sin, 0, sizeof(*sin));
1996		sin->sin_family = AF_INET;
1997		sin->sin_len = sizeof(struct sockaddr_in);
1998		sin->sin_addr.s_addr = cookie->laddress[0];
1999	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
2000		/* source addr is IPv6 */
2001		sin6 = (struct sockaddr_in6 *)initack_src;
2002		memset(sin6, 0, sizeof(*sin6));
2003		sin6->sin6_family = AF_INET6;
2004		sin6->sin6_len = sizeof(struct sockaddr_in6);
2005		sin6->sin6_scope_id = cookie->scope_id;
2006		memcpy(&sin6->sin6_addr, cookie->laddress,
2007		    sizeof(sin6->sin6_addr));
2008	} else {
2009		atomic_add_int(&stcb->asoc.refcnt, 1);
2010#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2011		SCTP_TCB_UNLOCK(stcb);
2012		SCTP_SOCKET_LOCK(so, 1);
2013		SCTP_TCB_LOCK(stcb);
2014#endif
2015		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2016#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2017		SCTP_SOCKET_UNLOCK(so, 1);
2018#endif
2019		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2020		return (NULL);
2021	}
2022
2023	/* set up to notify upper layer */
2024	*notification = SCTP_NOTIFY_ASSOC_UP;
2025	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2026	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2027	    (inp->sctp_socket->so_qlimit == 0)) {
2028		/*
2029		 * This is an endpoint that called connect() how it got a
2030		 * cookie that is NEW is a bit of a mystery. It must be that
2031		 * the INIT was sent, but before it got there.. a complete
2032		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2033		 * should have went to the other code.. not here.. oh well..
2034		 * a bit of protection is worth having..
2035		 */
2036		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2037#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2038		atomic_add_int(&stcb->asoc.refcnt, 1);
2039		SCTP_TCB_UNLOCK(stcb);
2040		SCTP_SOCKET_LOCK(so, 1);
2041		SCTP_TCB_LOCK(stcb);
2042		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2043		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2044			SCTP_SOCKET_UNLOCK(so, 1);
2045			return (NULL);
2046		}
2047#endif
2048		soisconnected(stcb->sctp_socket);
2049#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2050		SCTP_SOCKET_UNLOCK(so, 1);
2051#endif
2052	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2053	    (inp->sctp_socket->so_qlimit)) {
2054		/*
2055		 * We don't want to do anything with this one. Since it is
2056		 * the listening guy. The timer will get started for
2057		 * accepted connections in the caller.
2058		 */
2059		;
2060	}
2061	/* since we did not send a HB make sure we don't double things */
2062	if ((netp) && (*netp))
2063		(*netp)->hb_responded = 1;
2064
2065	if (stcb->asoc.sctp_autoclose_ticks &&
2066	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2067		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2068	}
2069	/* calculate the RTT */
2070	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2071	if ((netp) && (*netp)) {
2072		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2073		    &cookie->time_entered, sctp_align_unsafe_makecopy);
2074	}
2075	/* respond with a COOKIE-ACK */
2076	sctp_send_cookie_ack(stcb);
2077
2078	/*
2079	 * check the address lists for any ASCONFs that need to be sent
2080	 * AFTER the cookie-ack is sent
2081	 */
2082	sctp_check_address_list(stcb, m,
2083	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2084	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2085	    initack_src, cookie->local_scope, cookie->site_scope,
2086	    cookie->ipv4_scope, cookie->loopback_scope);
2087
2088
2089	return (stcb);
2090}
2091
2092
2093/*
2094 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2095 * existing (non-NULL) TCB
2096 */
2097static struct mbuf *
2098sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2099    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2100    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2101    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2102    struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port)
2103{
2104	struct sctp_state_cookie *cookie;
2105	struct sockaddr_in6 sin6;
2106	struct sockaddr_in sin;
2107	struct sctp_tcb *l_stcb = *stcb;
2108	struct sctp_inpcb *l_inp;
2109	struct sockaddr *to;
2110	sctp_assoc_t sac_restart_id;
2111	struct sctp_pcb *ep;
2112	struct mbuf *m_sig;
2113	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2114	uint8_t *sig;
2115	uint8_t cookie_ok = 0;
2116	unsigned int size_of_pkt, sig_offset, cookie_offset;
2117	unsigned int cookie_len;
2118	struct timeval now;
2119	struct timeval time_expires;
2120	struct sockaddr_storage dest_store;
2121	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
2122	struct ip *iph;
2123	int notification = 0;
2124	struct sctp_nets *netl;
2125	int had_a_existing_tcb = 0;
2126
2127	SCTPDBG(SCTP_DEBUG_INPUT2,
2128	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2129
2130	if (inp_p == NULL) {
2131		return (NULL);
2132	}
2133	/* First get the destination address setup too. */
2134	iph = mtod(m, struct ip *);
2135	switch (iph->ip_v) {
2136	case IPVERSION:
2137		{
2138			/* its IPv4 */
2139			struct sockaddr_in *lsin;
2140
2141			lsin = (struct sockaddr_in *)(localep_sa);
2142			memset(lsin, 0, sizeof(*lsin));
2143			lsin->sin_family = AF_INET;
2144			lsin->sin_len = sizeof(*lsin);
2145			lsin->sin_port = sh->dest_port;
2146			lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
2147			size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
2148			break;
2149		}
2150#ifdef INET6
2151	case IPV6_VERSION >> 4:
2152		{
2153			/* its IPv6 */
2154			struct ip6_hdr *ip6;
2155			struct sockaddr_in6 *lsin6;
2156
2157			lsin6 = (struct sockaddr_in6 *)(localep_sa);
2158			memset(lsin6, 0, sizeof(*lsin6));
2159			lsin6->sin6_family = AF_INET6;
2160			lsin6->sin6_len = sizeof(struct sockaddr_in6);
2161			ip6 = mtod(m, struct ip6_hdr *);
2162			lsin6->sin6_port = sh->dest_port;
2163			lsin6->sin6_addr = ip6->ip6_dst;
2164			size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
2165			break;
2166		}
2167#endif
2168	default:
2169		return (NULL);
2170	}
2171
2172	cookie = &cp->cookie;
2173	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2174	cookie_len = ntohs(cp->ch.chunk_length);
2175
2176	if ((cookie->peerport != sh->src_port) &&
2177	    (cookie->myport != sh->dest_port) &&
2178	    (cookie->my_vtag != sh->v_tag)) {
2179		/*
2180		 * invalid ports or bad tag.  Note that we always leave the
2181		 * v_tag in the header in network order and when we stored
2182		 * it in the my_vtag slot we also left it in network order.
2183		 * This maintains the match even though it may be in the
2184		 * opposite byte order of the machine :->
2185		 */
2186		return (NULL);
2187	}
2188	if (cookie_len > size_of_pkt ||
2189	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2190	    sizeof(struct sctp_init_chunk) +
2191	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2192		/* cookie too long!  or too small */
2193		return (NULL);
2194	}
2195	/*
2196	 * split off the signature into its own mbuf (since it should not be
2197	 * calculated in the sctp_hmac_m() call).
2198	 */
2199	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2200	if (sig_offset > size_of_pkt) {
2201		/* packet not correct size! */
2202		/* XXX this may already be accounted for earlier... */
2203		return (NULL);
2204	}
2205	m_sig = m_split(m, sig_offset, M_DONTWAIT);
2206	if (m_sig == NULL) {
2207		/* out of memory or ?? */
2208		return (NULL);
2209	}
2210#ifdef SCTP_MBUF_LOGGING
2211	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2212		struct mbuf *mat;
2213
2214		mat = m_sig;
2215		while (mat) {
2216			if (SCTP_BUF_IS_EXTENDED(mat)) {
2217				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2218			}
2219			mat = SCTP_BUF_NEXT(mat);
2220		}
2221	}
2222#endif
2223
2224	/*
2225	 * compute the signature/digest for the cookie
2226	 */
2227	ep = &(*inp_p)->sctp_ep;
2228	l_inp = *inp_p;
2229	if (l_stcb) {
2230		SCTP_TCB_UNLOCK(l_stcb);
2231	}
2232	SCTP_INP_RLOCK(l_inp);
2233	if (l_stcb) {
2234		SCTP_TCB_LOCK(l_stcb);
2235	}
2236	/* which cookie is it? */
2237	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2238	    (ep->current_secret_number != ep->last_secret_number)) {
2239		/* it's the old cookie */
2240		(void)sctp_hmac_m(SCTP_HMAC,
2241		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2242		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2243	} else {
2244		/* it's the current cookie */
2245		(void)sctp_hmac_m(SCTP_HMAC,
2246		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2247		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2248	}
2249	/* get the signature */
2250	SCTP_INP_RUNLOCK(l_inp);
2251	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2252	if (sig == NULL) {
2253		/* couldn't find signature */
2254		sctp_m_freem(m_sig);
2255		return (NULL);
2256	}
2257	/* compare the received digest with the computed digest */
2258	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2259		/* try the old cookie? */
2260		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2261		    (ep->current_secret_number != ep->last_secret_number)) {
2262			/* compute digest with old */
2263			(void)sctp_hmac_m(SCTP_HMAC,
2264			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2265			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2266			/* compare */
2267			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2268				cookie_ok = 1;
2269		}
2270	} else {
2271		cookie_ok = 1;
2272	}
2273
2274	/*
2275	 * Now before we continue we must reconstruct our mbuf so that
2276	 * normal processing of any other chunks will work.
2277	 */
2278	{
2279		struct mbuf *m_at;
2280
2281		m_at = m;
2282		while (SCTP_BUF_NEXT(m_at) != NULL) {
2283			m_at = SCTP_BUF_NEXT(m_at);
2284		}
2285		SCTP_BUF_NEXT(m_at) = m_sig;
2286	}
2287
2288	if (cookie_ok == 0) {
2289		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2290		SCTPDBG(SCTP_DEBUG_INPUT2,
2291		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2292		    (uint32_t) offset, cookie_offset, sig_offset);
2293		return (NULL);
2294	}
2295	/*
2296	 * check the cookie timestamps to be sure it's not stale
2297	 */
2298	(void)SCTP_GETTIME_TIMEVAL(&now);
2299	/* Expire time is in Ticks, so we convert to seconds */
2300	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2301	time_expires.tv_usec = cookie->time_entered.tv_usec;
2302	/*
2303	 * TODO sctp_constants.h needs alternative time macros when _KERNEL
2304	 * is undefined.
2305	 */
2306	if (timevalcmp(&now, &time_expires, >)) {
2307		/* cookie is stale! */
2308		struct mbuf *op_err;
2309		struct sctp_stale_cookie_msg *scm;
2310		uint32_t tim;
2311
2312		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2313		    0, M_DONTWAIT, 1, MT_DATA);
2314		if (op_err == NULL) {
2315			/* FOOBAR */
2316			return (NULL);
2317		}
2318		/* pre-reserve some space */
2319#ifdef INET6
2320		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
2321#else
2322		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
2323#endif
2324		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
2325		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
2326
2327		/* Set the len */
2328		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2329		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2330		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2331		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2332		    (sizeof(uint32_t))));
2333		/* seconds to usec */
2334		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2335		/* add in usec */
2336		if (tim == 0)
2337			tim = now.tv_usec - cookie->time_entered.tv_usec;
2338		scm->time_usec = htonl(tim);
2339		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
2340		    vrf_id, port);
2341		return (NULL);
2342	}
2343	/*
2344	 * Now we must see with the lookup address if we have an existing
2345	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2346	 * and a INIT collided with us and somewhere the peer sent the
2347	 * cookie on another address besides the single address our assoc
2348	 * had for him. In this case we will have one of the tie-tags set at
2349	 * least AND the address field in the cookie can be used to look it
2350	 * up.
2351	 */
2352	to = NULL;
2353	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
2354		memset(&sin6, 0, sizeof(sin6));
2355		sin6.sin6_family = AF_INET6;
2356		sin6.sin6_len = sizeof(sin6);
2357		sin6.sin6_port = sh->src_port;
2358		sin6.sin6_scope_id = cookie->scope_id;
2359		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2360		    sizeof(sin6.sin6_addr.s6_addr));
2361		to = (struct sockaddr *)&sin6;
2362	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
2363		memset(&sin, 0, sizeof(sin));
2364		sin.sin_family = AF_INET;
2365		sin.sin_len = sizeof(sin);
2366		sin.sin_port = sh->src_port;
2367		sin.sin_addr.s_addr = cookie->address[0];
2368		to = (struct sockaddr *)&sin;
2369	} else {
2370		/* This should not happen */
2371		return (NULL);
2372	}
2373	if ((*stcb == NULL) && to) {
2374		/* Yep, lets check */
2375		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
2376		if (*stcb == NULL) {
2377			/*
2378			 * We should have only got back the same inp. If we
2379			 * got back a different ep we have a problem. The
2380			 * original findep got back l_inp and now
2381			 */
2382			if (l_inp != *inp_p) {
2383				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2384			}
2385		} else {
2386			if (*locked_tcb == NULL) {
2387				/*
2388				 * In this case we found the assoc only
2389				 * after we locked the create lock. This
2390				 * means we are in a colliding case and we
2391				 * must make sure that we unlock the tcb if
2392				 * its one of the cases where we throw away
2393				 * the incoming packets.
2394				 */
2395				*locked_tcb = *stcb;
2396
2397				/*
2398				 * We must also increment the inp ref count
2399				 * since the ref_count flags was set when we
2400				 * did not find the TCB, now we found it
2401				 * which reduces the refcount.. we must
2402				 * raise it back out to balance it all :-)
2403				 */
2404				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2405				if ((*stcb)->sctp_ep != l_inp) {
2406					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2407					    (*stcb)->sctp_ep, l_inp);
2408				}
2409			}
2410		}
2411	}
2412	if (to == NULL)
2413		return (NULL);
2414
2415	cookie_len -= SCTP_SIGNATURE_SIZE;
2416	if (*stcb == NULL) {
2417		/* this is the "normal" case... get a new TCB */
2418		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2419		    cookie_len, *inp_p, netp, to, &notification,
2420		    auth_skipped, auth_offset, auth_len, vrf_id, port);
2421	} else {
2422		/* this is abnormal... cookie-echo on existing TCB */
2423		had_a_existing_tcb = 1;
2424		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2425		    cookie, cookie_len, *inp_p, *stcb, *netp, to,
2426		    &notification, &sac_restart_id, vrf_id);
2427	}
2428
2429	if (*stcb == NULL) {
2430		/* still no TCB... must be bad cookie-echo */
2431		return (NULL);
2432	}
2433	/*
2434	 * Ok, we built an association so confirm the address we sent the
2435	 * INIT-ACK to.
2436	 */
2437	netl = sctp_findnet(*stcb, to);
2438	/*
2439	 * This code should in theory NOT run but
2440	 */
2441	if (netl == NULL) {
2442		/* TSNH! Huh, why do I need to add this address here? */
2443		int ret;
2444
2445		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2446		    SCTP_IN_COOKIE_PROC);
2447		netl = sctp_findnet(*stcb, to);
2448	}
2449	if (netl) {
2450		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2451			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2452			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2453			    netl);
2454			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2455			    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2456		}
2457	}
2458	if (*stcb) {
2459		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2460		    *stcb, NULL);
2461	}
2462	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2463		if (!had_a_existing_tcb ||
2464		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2465			/*
2466			 * If we have a NEW cookie or the connect never
2467			 * reached the connected state during collision we
2468			 * must do the TCP accept thing.
2469			 */
2470			struct socket *so, *oso;
2471			struct sctp_inpcb *inp;
2472
2473			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2474				/*
2475				 * For a restart we will keep the same
2476				 * socket, no need to do anything. I THINK!!
2477				 */
2478				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED);
2479				return (m);
2480			}
2481			oso = (*inp_p)->sctp_socket;
2482			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2483			SCTP_TCB_UNLOCK((*stcb));
2484			so = sonewconn(oso, 0
2485			    );
2486			SCTP_TCB_LOCK((*stcb));
2487			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2488
2489			if (so == NULL) {
2490				struct mbuf *op_err;
2491
2492#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2493				struct socket *pcb_so;
2494
2495#endif
2496				/* Too many sockets */
2497				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2498				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2499				sctp_abort_association(*inp_p, NULL, m, iphlen,
2500				    sh, op_err, vrf_id, port);
2501#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2502				pcb_so = SCTP_INP_SO(*inp_p);
2503				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2504				SCTP_TCB_UNLOCK((*stcb));
2505				SCTP_SOCKET_LOCK(pcb_so, 1);
2506				SCTP_TCB_LOCK((*stcb));
2507				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2508#endif
2509				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2510#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2511				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2512#endif
2513				return (NULL);
2514			}
2515			inp = (struct sctp_inpcb *)so->so_pcb;
2516			SCTP_INP_INCR_REF(inp);
2517			/*
2518			 * We add the unbound flag here so that if we get an
2519			 * soabort() before we get the move_pcb done, we
2520			 * will properly cleanup.
2521			 */
2522			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2523			    SCTP_PCB_FLAGS_CONNECTED |
2524			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2525			    SCTP_PCB_FLAGS_UNBOUND |
2526			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2527			    SCTP_PCB_FLAGS_DONT_WAKE);
2528			inp->sctp_features = (*inp_p)->sctp_features;
2529			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2530			inp->sctp_socket = so;
2531			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2532			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2533			inp->sctp_context = (*inp_p)->sctp_context;
2534			inp->inp_starting_point_for_iterator = NULL;
2535			/*
2536			 * copy in the authentication parameters from the
2537			 * original endpoint
2538			 */
2539			if (inp->sctp_ep.local_hmacs)
2540				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2541			inp->sctp_ep.local_hmacs =
2542			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2543			if (inp->sctp_ep.local_auth_chunks)
2544				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2545			inp->sctp_ep.local_auth_chunks =
2546			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2547			(void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
2548			    &inp->sctp_ep.shared_keys);
2549
2550			/*
2551			 * Now we must move it from one hash table to
2552			 * another and get the tcb in the right place.
2553			 */
2554			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2555
2556			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2557			SCTP_TCB_UNLOCK((*stcb));
2558
2559			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2560			    0);
2561			SCTP_TCB_LOCK((*stcb));
2562			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2563
2564
2565			/*
2566			 * now we must check to see if we were aborted while
2567			 * the move was going on and the lock/unlock
2568			 * happened.
2569			 */
2570			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2571				/*
2572				 * yep it was, we leave the assoc attached
2573				 * to the socket since the sctp_inpcb_free()
2574				 * call will send an abort for us.
2575				 */
2576				SCTP_INP_DECR_REF(inp);
2577				return (NULL);
2578			}
2579			SCTP_INP_DECR_REF(inp);
2580			/* Switch over to the new guy */
2581			*inp_p = inp;
2582			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2583
2584			/*
2585			 * Pull it from the incomplete queue and wake the
2586			 * guy
2587			 */
2588#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2589			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2590			SCTP_TCB_UNLOCK((*stcb));
2591			SCTP_SOCKET_LOCK(so, 1);
2592#endif
2593			soisconnected(so);
2594#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2595			SCTP_TCB_LOCK((*stcb));
2596			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2597			SCTP_SOCKET_UNLOCK(so, 1);
2598#endif
2599			return (m);
2600		}
2601	}
2602	if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2603		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2604	}
2605	return (m);
2606}
2607
2608static void
2609sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2610    struct sctp_tcb *stcb, struct sctp_nets *net)
2611{
2612	/* cp must not be used, others call this without a c-ack :-) */
2613	struct sctp_association *asoc;
2614
2615	SCTPDBG(SCTP_DEBUG_INPUT2,
2616	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2617	if (stcb == NULL)
2618		return;
2619
2620	asoc = &stcb->asoc;
2621
2622	sctp_stop_all_cookie_timers(stcb);
2623	/* process according to association state */
2624	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2625		/* state change only needed when I am in right state */
2626		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2627		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2628		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2629			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2630			    stcb->sctp_ep, stcb, asoc->primary_destination);
2631
2632		}
2633		/* update RTO */
2634		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2635		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2636		if (asoc->overall_error_count == 0) {
2637			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2638			    &asoc->time_entered, sctp_align_safe_nocopy);
2639		}
2640		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2641		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2642		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2643		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2644#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2645			struct socket *so;
2646
2647#endif
2648			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2649#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2650			so = SCTP_INP_SO(stcb->sctp_ep);
2651			atomic_add_int(&stcb->asoc.refcnt, 1);
2652			SCTP_TCB_UNLOCK(stcb);
2653			SCTP_SOCKET_LOCK(so, 1);
2654			SCTP_TCB_LOCK(stcb);
2655			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2656			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2657				SCTP_SOCKET_UNLOCK(so, 1);
2658				return;
2659			}
2660#endif
2661			soisconnected(stcb->sctp_socket);
2662#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2663			SCTP_SOCKET_UNLOCK(so, 1);
2664#endif
2665		}
2666		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2667		    stcb, net);
2668		/*
2669		 * since we did not send a HB make sure we don't double
2670		 * things
2671		 */
2672		net->hb_responded = 1;
2673
2674		if (stcb->asoc.sctp_autoclose_ticks &&
2675		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2676			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2677			    stcb->sctp_ep, stcb, NULL);
2678		}
2679		/*
2680		 * send ASCONF if parameters are pending and ASCONFs are
2681		 * allowed (eg. addresses changed when init/cookie echo were
2682		 * in flight)
2683		 */
2684		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2685		    (stcb->asoc.peer_supports_asconf) &&
2686		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2687#ifdef SCTP_TIMER_BASED_ASCONF
2688			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2689			    stcb->sctp_ep, stcb,
2690			    stcb->asoc.primary_destination);
2691#else
2692			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2693			    SCTP_ADDR_NOT_LOCKED);
2694#endif
2695		}
2696	}
2697	/* Toss the cookie if I can */
2698	sctp_toss_old_cookies(stcb, asoc);
2699	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2700		/* Restart the timer if we have pending data */
2701		struct sctp_tmit_chunk *chk;
2702
2703		chk = TAILQ_FIRST(&asoc->sent_queue);
2704		if (chk) {
2705			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2706			    stcb, chk->whoTo);
2707		}
2708	}
2709}
2710
2711static void
2712sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2713    struct sctp_tcb *stcb)
2714{
2715	struct sctp_nets *net;
2716	struct sctp_tmit_chunk *lchk;
2717	uint32_t tsn;
2718
2719	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2720		return;
2721	}
2722	SCTP_STAT_INCR(sctps_recvecne);
2723	tsn = ntohl(cp->tsn);
2724	/* ECN Nonce stuff: need a resync and disable the nonce sum check */
2725	/* Also we make sure we disable the nonce_wait */
2726	lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2727	if (lchk == NULL) {
2728		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2729	} else {
2730		stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2731	}
2732	stcb->asoc.nonce_wait_for_ecne = 0;
2733	stcb->asoc.nonce_sum_check = 0;
2734
2735	/* Find where it was sent, if possible */
2736	net = NULL;
2737	lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2738	while (lchk) {
2739		if (lchk->rec.data.TSN_seq == tsn) {
2740			net = lchk->whoTo;
2741			break;
2742		}
2743		if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2744			break;
2745		lchk = TAILQ_NEXT(lchk, sctp_next);
2746	}
2747	if (net == NULL)
2748		/* default is we use the primary */
2749		net = stcb->asoc.primary_destination;
2750
2751	if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2752		/*
2753		 * JRS - Use the congestion control given in the pluggable
2754		 * CC module
2755		 */
2756		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net);
2757		/*
2758		 * we reduce once every RTT. So we will only lower cwnd at
2759		 * the next sending seq i.e. the resync_tsn.
2760		 */
2761		stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2762	}
2763	/*
2764	 * We always send a CWR this way if our previous one was lost our
2765	 * peer will get an update, or if it is not time again to reduce we
2766	 * still get the cwr to the peer.
2767	 */
2768	sctp_send_cwr(stcb, net, tsn);
2769}
2770
2771static void
2772sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2773{
2774	/*
2775	 * Here we get a CWR from the peer. We must look in the outqueue and
2776	 * make sure that we have a covered ECNE in teh control chunk part.
2777	 * If so remove it.
2778	 */
2779	struct sctp_tmit_chunk *chk;
2780	struct sctp_ecne_chunk *ecne;
2781
2782	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2783		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2784			continue;
2785		}
2786		/*
2787		 * Look for and remove if it is the right TSN. Since there
2788		 * is only ONE ECNE on the control queue at any one time we
2789		 * don't need to worry about more than one!
2790		 */
2791		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2792		if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2793		    MAX_TSN) || (cp->tsn == ecne->tsn)) {
2794			/* this covers this ECNE, we can remove it */
2795			stcb->asoc.ecn_echo_cnt_onq--;
2796			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2797			    sctp_next);
2798			if (chk->data) {
2799				sctp_m_freem(chk->data);
2800				chk->data = NULL;
2801			}
2802			stcb->asoc.ctrl_queue_cnt--;
2803			sctp_free_a_chunk(stcb, chk);
2804			break;
2805		}
2806	}
2807}
2808
2809static void
2810sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2811    struct sctp_tcb *stcb, struct sctp_nets *net)
2812{
2813	struct sctp_association *asoc;
2814
2815#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2816	struct socket *so;
2817
2818#endif
2819
2820	SCTPDBG(SCTP_DEBUG_INPUT2,
2821	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
2822	if (stcb == NULL)
2823		return;
2824
2825	asoc = &stcb->asoc;
2826	/* process according to association state */
2827	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
2828		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
2829		SCTPDBG(SCTP_DEBUG_INPUT2,
2830		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
2831		SCTP_TCB_UNLOCK(stcb);
2832		return;
2833	}
2834	/* notify upper layer protocol */
2835	if (stcb->sctp_socket) {
2836		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2837		/* are the queues empty? they should be */
2838		if (!TAILQ_EMPTY(&asoc->send_queue) ||
2839		    !TAILQ_EMPTY(&asoc->sent_queue) ||
2840		    !TAILQ_EMPTY(&asoc->out_wheel)) {
2841			sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
2842		}
2843	}
2844	/* stop the timer */
2845	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2846	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
2847	/* free the TCB */
2848	SCTPDBG(SCTP_DEBUG_INPUT2,
2849	    "sctp_handle_shutdown_complete: calls free-asoc\n");
2850#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2851	so = SCTP_INP_SO(stcb->sctp_ep);
2852	atomic_add_int(&stcb->asoc.refcnt, 1);
2853	SCTP_TCB_UNLOCK(stcb);
2854	SCTP_SOCKET_LOCK(so, 1);
2855	SCTP_TCB_LOCK(stcb);
2856	atomic_subtract_int(&stcb->asoc.refcnt, 1);
2857#endif
2858	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2859#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2860	SCTP_SOCKET_UNLOCK(so, 1);
2861#endif
2862	return;
2863}
2864
2865static int
2866process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
2867    struct sctp_nets *net, uint8_t flg)
2868{
2869	switch (desc->chunk_type) {
2870		case SCTP_DATA:
2871		/* find the tsn to resend (possibly */
2872		{
2873			uint32_t tsn;
2874			struct sctp_tmit_chunk *tp1;
2875
2876			tsn = ntohl(desc->tsn_ifany);
2877			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2878			while (tp1) {
2879				if (tp1->rec.data.TSN_seq == tsn) {
2880					/* found it */
2881					break;
2882				}
2883				if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
2884				    MAX_TSN)) {
2885					/* not found */
2886					tp1 = NULL;
2887					break;
2888				}
2889				tp1 = TAILQ_NEXT(tp1, sctp_next);
2890			}
2891			if (tp1 == NULL) {
2892				/*
2893				 * Do it the other way , aka without paying
2894				 * attention to queue seq order.
2895				 */
2896				SCTP_STAT_INCR(sctps_pdrpdnfnd);
2897				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2898				while (tp1) {
2899					if (tp1->rec.data.TSN_seq == tsn) {
2900						/* found it */
2901						break;
2902					}
2903					tp1 = TAILQ_NEXT(tp1, sctp_next);
2904				}
2905			}
2906			if (tp1 == NULL) {
2907				SCTP_STAT_INCR(sctps_pdrptsnnf);
2908			}
2909			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
2910				uint8_t *ddp;
2911
2912				if ((stcb->asoc.peers_rwnd == 0) &&
2913				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
2914					SCTP_STAT_INCR(sctps_pdrpdiwnp);
2915					return (0);
2916				}
2917				if (stcb->asoc.peers_rwnd == 0 &&
2918				    (flg & SCTP_FROM_MIDDLE_BOX)) {
2919					SCTP_STAT_INCR(sctps_pdrpdizrw);
2920					return (0);
2921				}
2922				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
2923				    sizeof(struct sctp_data_chunk));
2924				{
2925					unsigned int iii;
2926
2927					for (iii = 0; iii < sizeof(desc->data_bytes);
2928					    iii++) {
2929						if (ddp[iii] != desc->data_bytes[iii]) {
2930							SCTP_STAT_INCR(sctps_pdrpbadd);
2931							return (-1);
2932						}
2933					}
2934				}
2935				/*
2936				 * We zero out the nonce so resync not
2937				 * needed
2938				 */
2939				tp1->rec.data.ect_nonce = 0;
2940
2941				if (tp1->do_rtt) {
2942					/*
2943					 * this guy had a RTO calculation
2944					 * pending on it, cancel it
2945					 */
2946					tp1->do_rtt = 0;
2947				}
2948				SCTP_STAT_INCR(sctps_pdrpmark);
2949				if (tp1->sent != SCTP_DATAGRAM_RESEND)
2950					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2951				tp1->sent = SCTP_DATAGRAM_RESEND;
2952				/*
2953				 * mark it as if we were doing a FR, since
2954				 * we will be getting gap ack reports behind
2955				 * the info from the router.
2956				 */
2957				tp1->rec.data.doing_fast_retransmit = 1;
2958				/*
2959				 * mark the tsn with what sequences can
2960				 * cause a new FR.
2961				 */
2962				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
2963					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
2964				} else {
2965					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
2966				}
2967
2968				/* restart the timer */
2969				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2970				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
2971				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2972				    stcb, tp1->whoTo);
2973
2974				/* fix counts and things */
2975				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2976					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
2977					    tp1->whoTo->flight_size,
2978					    tp1->book_size,
2979					    (uintptr_t) stcb,
2980					    tp1->rec.data.TSN_seq);
2981				}
2982				sctp_flight_size_decrease(tp1);
2983				sctp_total_flight_decrease(stcb, tp1);
2984			} {
2985				/* audit code */
2986				unsigned int audit;
2987
2988				audit = 0;
2989				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
2990					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2991						audit++;
2992				}
2993				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
2994				    sctp_next) {
2995					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2996						audit++;
2997				}
2998				if (audit != stcb->asoc.sent_queue_retran_cnt) {
2999					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3000					    audit, stcb->asoc.sent_queue_retran_cnt);
3001#ifndef SCTP_AUDITING_ENABLED
3002					stcb->asoc.sent_queue_retran_cnt = audit;
3003#endif
3004				}
3005			}
3006		}
3007		break;
3008	case SCTP_ASCONF:
3009		{
3010			struct sctp_tmit_chunk *asconf;
3011
3012			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3013			    sctp_next) {
3014				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3015					break;
3016				}
3017			}
3018			if (asconf) {
3019				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3020					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3021				asconf->sent = SCTP_DATAGRAM_RESEND;
3022				asconf->snd_count--;
3023			}
3024		}
3025		break;
3026	case SCTP_INITIATION:
3027		/* resend the INIT */
3028		stcb->asoc.dropped_special_cnt++;
3029		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3030			/*
3031			 * If we can get it in, in a few attempts we do
3032			 * this, otherwise we let the timer fire.
3033			 */
3034			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3035			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3036			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3037		}
3038		break;
3039	case SCTP_SELECTIVE_ACK:
3040		/* resend the sack */
3041		sctp_send_sack(stcb);
3042		break;
3043	case SCTP_HEARTBEAT_REQUEST:
3044		/* resend a demand HB */
3045		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3046			/*
3047			 * Only retransmit if we KNOW we wont destroy the
3048			 * tcb
3049			 */
3050			(void)sctp_send_hb(stcb, 1, net);
3051		}
3052		break;
3053	case SCTP_SHUTDOWN:
3054		sctp_send_shutdown(stcb, net);
3055		break;
3056	case SCTP_SHUTDOWN_ACK:
3057		sctp_send_shutdown_ack(stcb, net);
3058		break;
3059	case SCTP_COOKIE_ECHO:
3060		{
3061			struct sctp_tmit_chunk *cookie;
3062
3063			cookie = NULL;
3064			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3065			    sctp_next) {
3066				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3067					break;
3068				}
3069			}
3070			if (cookie) {
3071				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3072					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3073				cookie->sent = SCTP_DATAGRAM_RESEND;
3074				sctp_stop_all_cookie_timers(stcb);
3075			}
3076		}
3077		break;
3078	case SCTP_COOKIE_ACK:
3079		sctp_send_cookie_ack(stcb);
3080		break;
3081	case SCTP_ASCONF_ACK:
3082		/* resend last asconf ack */
3083		sctp_send_asconf_ack(stcb);
3084		break;
3085	case SCTP_FORWARD_CUM_TSN:
3086		send_forward_tsn(stcb, &stcb->asoc);
3087		break;
3088		/* can't do anything with these */
3089	case SCTP_PACKET_DROPPED:
3090	case SCTP_INITIATION_ACK:	/* this should not happen */
3091	case SCTP_HEARTBEAT_ACK:
3092	case SCTP_ABORT_ASSOCIATION:
3093	case SCTP_OPERATION_ERROR:
3094	case SCTP_SHUTDOWN_COMPLETE:
3095	case SCTP_ECN_ECHO:
3096	case SCTP_ECN_CWR:
3097	default:
3098		break;
3099	}
3100	return (0);
3101}
3102
3103void
3104sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3105{
3106	int i;
3107	uint16_t temp;
3108
3109	/*
3110	 * We set things to 0xffff since this is the last delivered sequence
3111	 * and we will be sending in 0 after the reset.
3112	 */
3113
3114	if (number_entries) {
3115		for (i = 0; i < number_entries; i++) {
3116			temp = ntohs(list[i]);
3117			if (temp >= stcb->asoc.streamincnt) {
3118				continue;
3119			}
3120			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3121		}
3122	} else {
3123		list = NULL;
3124		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3125			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3126		}
3127	}
3128	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3129}
3130
3131static void
3132sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3133{
3134	int i;
3135
3136	if (number_entries == 0) {
3137		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3138			stcb->asoc.strmout[i].next_sequence_sent = 0;
3139		}
3140	} else if (number_entries) {
3141		for (i = 0; i < number_entries; i++) {
3142			uint16_t temp;
3143
3144			temp = ntohs(list[i]);
3145			if (temp >= stcb->asoc.streamoutcnt) {
3146				/* no such stream */
3147				continue;
3148			}
3149			stcb->asoc.strmout[temp].next_sequence_sent = 0;
3150		}
3151	}
3152	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3153}
3154
3155
3156struct sctp_stream_reset_out_request *
3157sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3158{
3159	struct sctp_association *asoc;
3160	struct sctp_stream_reset_out_req *req;
3161	struct sctp_stream_reset_out_request *r;
3162	struct sctp_tmit_chunk *chk;
3163	int len, clen;
3164
3165	asoc = &stcb->asoc;
3166	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3167		asoc->stream_reset_outstanding = 0;
3168		return (NULL);
3169	}
3170	if (stcb->asoc.str_reset == NULL) {
3171		asoc->stream_reset_outstanding = 0;
3172		return (NULL);
3173	}
3174	chk = stcb->asoc.str_reset;
3175	if (chk->data == NULL) {
3176		return (NULL);
3177	}
3178	if (bchk) {
3179		/* he wants a copy of the chk pointer */
3180		*bchk = chk;
3181	}
3182	clen = chk->send_size;
3183	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
3184	r = &req->sr_req;
3185	if (ntohl(r->request_seq) == seq) {
3186		/* found it */
3187		return (r);
3188	}
3189	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3190	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3191		/* move to the next one, there can only be a max of two */
3192		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3193		if (ntohl(r->request_seq) == seq) {
3194			return (r);
3195		}
3196	}
3197	/* that seq is not here */
3198	return (NULL);
3199}
3200
3201static void
3202sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3203{
3204	struct sctp_association *asoc;
3205	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3206
3207	if (stcb->asoc.str_reset == NULL) {
3208		return;
3209	}
3210	asoc = &stcb->asoc;
3211
3212	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3213	TAILQ_REMOVE(&asoc->control_send_queue,
3214	    chk,
3215	    sctp_next);
3216	if (chk->data) {
3217		sctp_m_freem(chk->data);
3218		chk->data = NULL;
3219	}
3220	asoc->ctrl_queue_cnt--;
3221	sctp_free_a_chunk(stcb, chk);
3222	/* sa_ignore NO_NULL_CHK */
3223	stcb->asoc.str_reset = NULL;
3224}
3225
3226
3227static int
3228sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3229    uint32_t seq, uint32_t action,
3230    struct sctp_stream_reset_response *respin)
3231{
3232	uint16_t type;
3233	int lparm_len;
3234	struct sctp_association *asoc = &stcb->asoc;
3235	struct sctp_tmit_chunk *chk;
3236	struct sctp_stream_reset_out_request *srparam;
3237	int number_entries;
3238
3239	if (asoc->stream_reset_outstanding == 0) {
3240		/* duplicate */
3241		return (0);
3242	}
3243	if (seq == stcb->asoc.str_reset_seq_out) {
3244		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3245		if (srparam) {
3246			stcb->asoc.str_reset_seq_out++;
3247			type = ntohs(srparam->ph.param_type);
3248			lparm_len = ntohs(srparam->ph.param_length);
3249			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3250				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3251				asoc->stream_reset_out_is_outstanding = 0;
3252				if (asoc->stream_reset_outstanding)
3253					asoc->stream_reset_outstanding--;
3254				if (action == SCTP_STREAM_RESET_PERFORMED) {
3255					/* do it */
3256					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3257				} else {
3258					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3259				}
3260			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3261				/* Answered my request */
3262				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3263				if (asoc->stream_reset_outstanding)
3264					asoc->stream_reset_outstanding--;
3265				if (action != SCTP_STREAM_RESET_PERFORMED) {
3266					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3267				}
3268			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3269				/**
3270				 * a) Adopt the new in tsn.
3271				 * b) reset the map
3272				 * c) Adopt the new out-tsn
3273				 */
3274				struct sctp_stream_reset_response_tsn *resp;
3275				struct sctp_forward_tsn_chunk fwdtsn;
3276				int abort_flag = 0;
3277
3278				if (respin == NULL) {
3279					/* huh ? */
3280					return (0);
3281				}
3282				if (action == SCTP_STREAM_RESET_PERFORMED) {
3283					resp = (struct sctp_stream_reset_response_tsn *)respin;
3284					asoc->stream_reset_outstanding--;
3285					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3286					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3287					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3288					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3289					if (abort_flag) {
3290						return (1);
3291					}
3292					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3293					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3294						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3295					}
3296					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3297					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3298					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3299					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3300					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3301
3302					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3303					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3304
3305				}
3306			}
3307			/* get rid of the request and get the request flags */
3308			if (asoc->stream_reset_outstanding == 0) {
3309				sctp_clean_up_stream_reset(stcb);
3310			}
3311		}
3312	}
3313	return (0);
3314}
3315
3316static void
3317sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3318    struct sctp_tmit_chunk *chk,
3319    struct sctp_stream_reset_in_request *req, int trunc)
3320{
3321	uint32_t seq;
3322	int len, i;
3323	int number_entries;
3324	uint16_t temp;
3325
3326	/*
3327	 * peer wants me to send a str-reset to him for my outgoing seq's if
3328	 * seq_in is right.
3329	 */
3330	struct sctp_association *asoc = &stcb->asoc;
3331
3332	seq = ntohl(req->request_seq);
3333	if (asoc->str_reset_seq_in == seq) {
3334		if (trunc) {
3335			/* Can't do it, since they exceeded our buffer size  */
3336			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3337			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3338			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3339		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3340			len = ntohs(req->ph.param_length);
3341			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3342			for (i = 0; i < number_entries; i++) {
3343				temp = ntohs(req->list_of_streams[i]);
3344				req->list_of_streams[i] = temp;
3345			}
3346			/* move the reset action back one */
3347			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3348			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3349			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3350			    asoc->str_reset_seq_out,
3351			    seq, (asoc->sending_seq - 1));
3352			asoc->stream_reset_out_is_outstanding = 1;
3353			asoc->str_reset = chk;
3354			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3355			stcb->asoc.stream_reset_outstanding++;
3356		} else {
3357			/* Can't do it, since we have sent one out */
3358			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3359			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
3360			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3361		}
3362		asoc->str_reset_seq_in++;
3363	} else if (asoc->str_reset_seq_in - 1 == seq) {
3364		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3365	} else if (asoc->str_reset_seq_in - 2 == seq) {
3366		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3367	} else {
3368		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3369	}
3370}
3371
3372static int
3373sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3374    struct sctp_tmit_chunk *chk,
3375    struct sctp_stream_reset_tsn_request *req)
3376{
3377	/* reset all in and out and update the tsn */
3378	/*
3379	 * A) reset my str-seq's on in and out. B) Select a receive next,
3380	 * and set cum-ack to it. Also process this selected number as a
3381	 * fwd-tsn as well. C) set in the response my next sending seq.
3382	 */
3383	struct sctp_forward_tsn_chunk fwdtsn;
3384	struct sctp_association *asoc = &stcb->asoc;
3385	int abort_flag = 0;
3386	uint32_t seq;
3387
3388	seq = ntohl(req->request_seq);
3389	if (asoc->str_reset_seq_in == seq) {
3390		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3391		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3392		fwdtsn.ch.chunk_flags = 0;
3393		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3394		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3395		if (abort_flag) {
3396			return (1);
3397		}
3398		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3399		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3400			sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3401		}
3402		stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3403		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3404		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3405		atomic_add_int(&stcb->asoc.sending_seq, 1);
3406		/* save off historical data for retrans */
3407		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
3408		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
3409		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
3410		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
3411
3412		sctp_add_stream_reset_result_tsn(chk,
3413		    ntohl(req->request_seq),
3414		    SCTP_STREAM_RESET_PERFORMED,
3415		    stcb->asoc.sending_seq,
3416		    stcb->asoc.mapping_array_base_tsn);
3417		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3418		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3419		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3420		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3421
3422		asoc->str_reset_seq_in++;
3423	} else if (asoc->str_reset_seq_in - 1 == seq) {
3424		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3425		    stcb->asoc.last_sending_seq[0],
3426		    stcb->asoc.last_base_tsnsent[0]
3427		    );
3428	} else if (asoc->str_reset_seq_in - 2 == seq) {
3429		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3430		    stcb->asoc.last_sending_seq[1],
3431		    stcb->asoc.last_base_tsnsent[1]
3432		    );
3433	} else {
3434		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3435	}
3436	return (0);
3437}
3438
3439static void
3440sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3441    struct sctp_tmit_chunk *chk,
3442    struct sctp_stream_reset_out_request *req, int trunc)
3443{
3444	uint32_t seq, tsn;
3445	int number_entries, len;
3446	struct sctp_association *asoc = &stcb->asoc;
3447
3448	seq = ntohl(req->request_seq);
3449
3450	/* now if its not a duplicate we process it */
3451	if (asoc->str_reset_seq_in == seq) {
3452		len = ntohs(req->ph.param_length);
3453		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3454		/*
3455		 * the sender is resetting, handle the list issue.. we must
3456		 * a) verify if we can do the reset, if so no problem b) If
3457		 * we can't do the reset we must copy the request. c) queue
3458		 * it, and setup the data in processor to trigger it off
3459		 * when needed and dequeue all the queued data.
3460		 */
3461		tsn = ntohl(req->send_reset_at_tsn);
3462
3463		/* move the reset action back one */
3464		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3465		if (trunc) {
3466			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3467			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3468		} else if ((tsn == asoc->cumulative_tsn) ||
3469		    (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3470			/* we can do it now */
3471			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3472			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3473			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3474		} else {
3475			/*
3476			 * we must queue it up and thus wait for the TSN's
3477			 * to arrive that are at or before tsn
3478			 */
3479			struct sctp_stream_reset_list *liste;
3480			int siz;
3481
3482			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3483			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3484			    siz, SCTP_M_STRESET);
3485			if (liste == NULL) {
3486				/* gak out of memory */
3487				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3488				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3489				return;
3490			}
3491			liste->tsn = tsn;
3492			liste->number_entries = number_entries;
3493			memcpy(&liste->req, req,
3494			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3495			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3496			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3497			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3498		}
3499		asoc->str_reset_seq_in++;
3500	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3501		/*
3502		 * one seq back, just echo back last action since my
3503		 * response was lost.
3504		 */
3505		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3506	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3507		/*
3508		 * two seq back, just echo back last action since my
3509		 * response was lost.
3510		 */
3511		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3512	} else {
3513		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3514	}
3515}
3516
3517#ifdef __GNUC__
3518__attribute__((noinline))
3519#endif
3520	static int
3521	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3522        struct sctp_stream_reset_out_req *sr_req)
3523{
3524	int chk_length, param_len, ptype;
3525	struct sctp_paramhdr pstore;
3526	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3527
3528	uint32_t seq;
3529	int num_req = 0;
3530	int trunc = 0;
3531	struct sctp_tmit_chunk *chk;
3532	struct sctp_chunkhdr *ch;
3533	struct sctp_paramhdr *ph;
3534	int ret_code = 0;
3535	int num_param = 0;
3536
3537	/* now it may be a reset or a reset-response */
3538	chk_length = ntohs(sr_req->ch.chunk_length);
3539
3540	/* setup for adding the response */
3541	sctp_alloc_a_chunk(stcb, chk);
3542	if (chk == NULL) {
3543		return (ret_code);
3544	}
3545	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3546	chk->rec.chunk_id.can_take_data = 0;
3547	chk->asoc = &stcb->asoc;
3548	chk->no_fr_allowed = 0;
3549	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3550	chk->book_size_scale = 0;
3551	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3552	if (chk->data == NULL) {
3553strres_nochunk:
3554		if (chk->data) {
3555			sctp_m_freem(chk->data);
3556			chk->data = NULL;
3557		}
3558		sctp_free_a_chunk(stcb, chk);
3559		return (ret_code);
3560	}
3561	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3562
3563	/* setup chunk parameters */
3564	chk->sent = SCTP_DATAGRAM_UNSENT;
3565	chk->snd_count = 0;
3566	chk->whoTo = stcb->asoc.primary_destination;
3567	atomic_add_int(&chk->whoTo->ref_count, 1);
3568
3569	ch = mtod(chk->data, struct sctp_chunkhdr *);
3570	ch->chunk_type = SCTP_STREAM_RESET;
3571	ch->chunk_flags = 0;
3572	ch->chunk_length = htons(chk->send_size);
3573	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3574	offset += sizeof(struct sctp_chunkhdr);
3575	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3576		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
3577		if (ph == NULL)
3578			break;
3579		param_len = ntohs(ph->param_length);
3580		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3581			/* bad param */
3582			break;
3583		}
3584		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
3585		    (uint8_t *) & cstore);
3586		ptype = ntohs(ph->param_type);
3587		num_param++;
3588		if (param_len > (int)sizeof(cstore)) {
3589			trunc = 1;
3590		} else {
3591			trunc = 0;
3592		}
3593
3594		if (num_param > SCTP_MAX_RESET_PARAMS) {
3595			/* hit the max of parameters already sorry.. */
3596			break;
3597		}
3598		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3599			struct sctp_stream_reset_out_request *req_out;
3600
3601			req_out = (struct sctp_stream_reset_out_request *)ph;
3602			num_req++;
3603			if (stcb->asoc.stream_reset_outstanding) {
3604				seq = ntohl(req_out->response_seq);
3605				if (seq == stcb->asoc.str_reset_seq_out) {
3606					/* implicit ack */
3607					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3608				}
3609			}
3610			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
3611		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3612			struct sctp_stream_reset_in_request *req_in;
3613
3614			num_req++;
3615
3616			req_in = (struct sctp_stream_reset_in_request *)ph;
3617
3618			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
3619		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3620			struct sctp_stream_reset_tsn_request *req_tsn;
3621
3622			num_req++;
3623			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3624
3625			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3626				ret_code = 1;
3627				goto strres_nochunk;
3628			}
3629			/* no more */
3630			break;
3631		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3632			struct sctp_stream_reset_response *resp;
3633			uint32_t result;
3634
3635			resp = (struct sctp_stream_reset_response *)ph;
3636			seq = ntohl(resp->response_seq);
3637			result = ntohl(resp->result);
3638			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3639				ret_code = 1;
3640				goto strres_nochunk;
3641			}
3642		} else {
3643			break;
3644		}
3645		offset += SCTP_SIZE32(param_len);
3646		chk_length -= SCTP_SIZE32(param_len);
3647	}
3648	if (num_req == 0) {
3649		/* we have no response free the stuff */
3650		goto strres_nochunk;
3651	}
3652	/* ok we have a chunk to link in */
3653	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3654	    chk,
3655	    sctp_next);
3656	stcb->asoc.ctrl_queue_cnt++;
3657	return (ret_code);
3658}
3659
3660/*
3661 * Handle a router or endpoints report of a packet loss, there are two ways
3662 * to handle this, either we get the whole packet and must disect it
3663 * ourselves (possibly with truncation and or corruption) or it is a summary
3664 * from a middle box that did the disectting for us.
3665 */
3666static void
3667sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3668    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
3669{
3670	uint32_t bottle_bw, on_queue;
3671	uint16_t trunc_len;
3672	unsigned int chlen;
3673	unsigned int at;
3674	struct sctp_chunk_desc desc;
3675	struct sctp_chunkhdr *ch;
3676
3677	chlen = ntohs(cp->ch.chunk_length);
3678	chlen -= sizeof(struct sctp_pktdrop_chunk);
3679	/* XXX possible chlen underflow */
3680	if (chlen == 0) {
3681		ch = NULL;
3682		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3683			SCTP_STAT_INCR(sctps_pdrpbwrpt);
3684	} else {
3685		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3686		chlen -= sizeof(struct sctphdr);
3687		/* XXX possible chlen underflow */
3688		memset(&desc, 0, sizeof(desc));
3689	}
3690	trunc_len = (uint16_t) ntohs(cp->trunc_len);
3691	if (trunc_len > limit) {
3692		trunc_len = limit;
3693	}
3694	/* now the chunks themselves */
3695	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3696		desc.chunk_type = ch->chunk_type;
3697		/* get amount we need to move */
3698		at = ntohs(ch->chunk_length);
3699		if (at < sizeof(struct sctp_chunkhdr)) {
3700			/* corrupt chunk, maybe at the end? */
3701			SCTP_STAT_INCR(sctps_pdrpcrupt);
3702			break;
3703		}
3704		if (trunc_len == 0) {
3705			/* we are supposed to have all of it */
3706			if (at > chlen) {
3707				/* corrupt skip it */
3708				SCTP_STAT_INCR(sctps_pdrpcrupt);
3709				break;
3710			}
3711		} else {
3712			/* is there enough of it left ? */
3713			if (desc.chunk_type == SCTP_DATA) {
3714				if (chlen < (sizeof(struct sctp_data_chunk) +
3715				    sizeof(desc.data_bytes))) {
3716					break;
3717				}
3718			} else {
3719				if (chlen < sizeof(struct sctp_chunkhdr)) {
3720					break;
3721				}
3722			}
3723		}
3724		if (desc.chunk_type == SCTP_DATA) {
3725			/* can we get out the tsn? */
3726			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3727				SCTP_STAT_INCR(sctps_pdrpmbda);
3728
3729			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
3730				/* yep */
3731				struct sctp_data_chunk *dcp;
3732				uint8_t *ddp;
3733				unsigned int iii;
3734
3735				dcp = (struct sctp_data_chunk *)ch;
3736				ddp = (uint8_t *) (dcp + 1);
3737				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
3738					desc.data_bytes[iii] = ddp[iii];
3739				}
3740				desc.tsn_ifany = dcp->dp.tsn;
3741			} else {
3742				/* nope we are done. */
3743				SCTP_STAT_INCR(sctps_pdrpnedat);
3744				break;
3745			}
3746		} else {
3747			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3748				SCTP_STAT_INCR(sctps_pdrpmbct);
3749		}
3750
3751		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
3752			SCTP_STAT_INCR(sctps_pdrppdbrk);
3753			break;
3754		}
3755		if (SCTP_SIZE32(at) > chlen) {
3756			break;
3757		}
3758		chlen -= SCTP_SIZE32(at);
3759		if (chlen < sizeof(struct sctp_chunkhdr)) {
3760			/* done, none left */
3761			break;
3762		}
3763		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
3764	}
3765	/* Now update any rwnd --- possibly */
3766	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
3767		/* From a peer, we get a rwnd report */
3768		uint32_t a_rwnd;
3769
3770		SCTP_STAT_INCR(sctps_pdrpfehos);
3771
3772		bottle_bw = ntohl(cp->bottle_bw);
3773		on_queue = ntohl(cp->current_onq);
3774		if (bottle_bw && on_queue) {
3775			/* a rwnd report is in here */
3776			if (bottle_bw > on_queue)
3777				a_rwnd = bottle_bw - on_queue;
3778			else
3779				a_rwnd = 0;
3780
3781			if (a_rwnd == 0)
3782				stcb->asoc.peers_rwnd = 0;
3783			else {
3784				if (a_rwnd > stcb->asoc.total_flight) {
3785					stcb->asoc.peers_rwnd =
3786					    a_rwnd - stcb->asoc.total_flight;
3787				} else {
3788					stcb->asoc.peers_rwnd = 0;
3789				}
3790				if (stcb->asoc.peers_rwnd <
3791				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3792					/* SWS sender side engages */
3793					stcb->asoc.peers_rwnd = 0;
3794				}
3795			}
3796		}
3797	} else {
3798		SCTP_STAT_INCR(sctps_pdrpfmbox);
3799	}
3800
3801	/* now middle boxes in sat networks get a cwnd bump */
3802	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
3803	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
3804	    (stcb->asoc.sat_network)) {
3805		/*
3806		 * This is debateable but for sat networks it makes sense
3807		 * Note if a T3 timer has went off, we will prohibit any
3808		 * changes to cwnd until we exit the t3 loss recovery.
3809		 */
3810		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
3811		    net, cp, &bottle_bw, &on_queue);
3812	}
3813}
3814
3815/*
3816 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
3817 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
3818 * offset: offset into the mbuf chain to first chunkhdr - length: is the
3819 * length of the complete packet outputs: - length: modified to remaining
3820 * length after control processing - netp: modified to new sctp_nets after
3821 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
3822 * bad packet,...) otherwise return the tcb for this packet
3823 */
3824#ifdef __GNUC__
3825__attribute__((noinline))
3826#endif
3827	static struct sctp_tcb *
3828	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
3829             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
3830             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
3831             uint32_t vrf_id, uint16_t port)
3832{
3833	struct sctp_association *asoc;
3834	uint32_t vtag_in;
3835	int num_chunks = 0;	/* number of control chunks processed */
3836	uint32_t chk_length;
3837	int ret;
3838	int abort_no_unlock = 0;
3839
3840	/*
3841	 * How big should this be, and should it be alloc'd? Lets try the
3842	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
3843	 * until we get into jumbo grams and such..
3844	 */
3845	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
3846	struct sctp_tcb *locked_tcb = stcb;
3847	int got_auth = 0;
3848	uint32_t auth_offset = 0, auth_len = 0;
3849	int auth_skipped = 0;
3850	int asconf_cnt = 0;
3851
3852#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3853	struct socket *so;
3854
3855#endif
3856
3857	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
3858	    iphlen, *offset, length, stcb);
3859
3860	/* validate chunk header length... */
3861	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
3862		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
3863		    ntohs(ch->chunk_length));
3864		if (locked_tcb) {
3865			SCTP_TCB_UNLOCK(locked_tcb);
3866		}
3867		return (NULL);
3868	}
3869	/*
3870	 * validate the verification tag
3871	 */
3872	vtag_in = ntohl(sh->v_tag);
3873
3874	if (locked_tcb) {
3875		SCTP_TCB_LOCK_ASSERT(locked_tcb);
3876	}
3877	if (ch->chunk_type == SCTP_INITIATION) {
3878		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
3879		    ntohs(ch->chunk_length), vtag_in);
3880		if (vtag_in != 0) {
3881			/* protocol error- silently discard... */
3882			SCTP_STAT_INCR(sctps_badvtag);
3883			if (locked_tcb) {
3884				SCTP_TCB_UNLOCK(locked_tcb);
3885			}
3886			return (NULL);
3887		}
3888	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
3889		/*
3890		 * If there is no stcb, skip the AUTH chunk and process
3891		 * later after a stcb is found (to validate the lookup was
3892		 * valid.
3893		 */
3894		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
3895		    (stcb == NULL) &&
3896		    !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
3897			/* save this chunk for later processing */
3898			auth_skipped = 1;
3899			auth_offset = *offset;
3900			auth_len = ntohs(ch->chunk_length);
3901
3902			/* (temporarily) move past this chunk */
3903			*offset += SCTP_SIZE32(auth_len);
3904			if (*offset >= length) {
3905				/* no more data left in the mbuf chain */
3906				*offset = length;
3907				if (locked_tcb) {
3908					SCTP_TCB_UNLOCK(locked_tcb);
3909				}
3910				return (NULL);
3911			}
3912			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3913			    sizeof(struct sctp_chunkhdr), chunk_buf);
3914		}
3915		if (ch == NULL) {
3916			/* Help */
3917			*offset = length;
3918			if (locked_tcb) {
3919				SCTP_TCB_UNLOCK(locked_tcb);
3920			}
3921			return (NULL);
3922		}
3923		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3924			goto process_control_chunks;
3925		}
3926		/*
3927		 * first check if it's an ASCONF with an unknown src addr we
3928		 * need to look inside to find the association
3929		 */
3930		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
3931			struct sctp_chunkhdr *asconf_ch = ch;
3932			uint32_t asconf_offset = 0, asconf_len = 0;
3933
3934			/* inp's refcount may be reduced */
3935			SCTP_INP_INCR_REF(inp);
3936
3937			asconf_offset = *offset;
3938			do {
3939				asconf_len = ntohs(asconf_ch->chunk_length);
3940				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
3941					break;
3942				stcb = sctp_findassociation_ep_asconf(m, iphlen,
3943				    *offset, sh, &inp, netp);
3944				if (stcb != NULL)
3945					break;
3946				asconf_offset += SCTP_SIZE32(asconf_len);
3947				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
3948				    sizeof(struct sctp_chunkhdr), chunk_buf);
3949			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
3950			if (stcb == NULL) {
3951				/*
3952				 * reduce inp's refcount if not reduced in
3953				 * sctp_findassociation_ep_asconf().
3954				 */
3955				SCTP_INP_DECR_REF(inp);
3956			} else {
3957				locked_tcb = stcb;
3958			}
3959
3960			/* now go back and verify any auth chunk to be sure */
3961			if (auth_skipped && (stcb != NULL)) {
3962				struct sctp_auth_chunk *auth;
3963
3964				auth = (struct sctp_auth_chunk *)
3965				    sctp_m_getptr(m, auth_offset,
3966				    auth_len, chunk_buf);
3967				got_auth = 1;
3968				auth_skipped = 0;
3969				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
3970				    auth_offset)) {
3971					/* auth HMAC failed so dump it */
3972					*offset = length;
3973					if (locked_tcb) {
3974						SCTP_TCB_UNLOCK(locked_tcb);
3975					}
3976					return (NULL);
3977				} else {
3978					/* remaining chunks are HMAC checked */
3979					stcb->asoc.authenticated = 1;
3980				}
3981			}
3982		}
3983		if (stcb == NULL) {
3984			/* no association, so it's out of the blue... */
3985			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
3986			    vrf_id, port);
3987			*offset = length;
3988			if (locked_tcb) {
3989				SCTP_TCB_UNLOCK(locked_tcb);
3990			}
3991			return (NULL);
3992		}
3993		asoc = &stcb->asoc;
3994		/* ABORT and SHUTDOWN can use either v_tag... */
3995		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
3996		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
3997		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
3998			if ((vtag_in == asoc->my_vtag) ||
3999			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
4000			    (vtag_in == asoc->peer_vtag))) {
4001				/* this is valid */
4002			} else {
4003				/* drop this packet... */
4004				SCTP_STAT_INCR(sctps_badvtag);
4005				if (locked_tcb) {
4006					SCTP_TCB_UNLOCK(locked_tcb);
4007				}
4008				return (NULL);
4009			}
4010		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4011			if (vtag_in != asoc->my_vtag) {
4012				/*
4013				 * this could be a stale SHUTDOWN-ACK or the
4014				 * peer never got the SHUTDOWN-COMPLETE and
4015				 * is still hung; we have started a new asoc
4016				 * but it won't complete until the shutdown
4017				 * is completed
4018				 */
4019				if (locked_tcb) {
4020					SCTP_TCB_UNLOCK(locked_tcb);
4021				}
4022				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
4023				    NULL, vrf_id, port);
4024				return (NULL);
4025			}
4026		} else {
4027			/* for all other chunks, vtag must match */
4028			if (vtag_in != asoc->my_vtag) {
4029				/* invalid vtag... */
4030				SCTPDBG(SCTP_DEBUG_INPUT3,
4031				    "invalid vtag: %xh, expect %xh\n",
4032				    vtag_in, asoc->my_vtag);
4033				SCTP_STAT_INCR(sctps_badvtag);
4034				if (locked_tcb) {
4035					SCTP_TCB_UNLOCK(locked_tcb);
4036				}
4037				*offset = length;
4038				return (NULL);
4039			}
4040		}
4041	}			/* end if !SCTP_COOKIE_ECHO */
4042	/*
4043	 * process all control chunks...
4044	 */
4045	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4046	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4047	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4048		/* implied cookie-ack.. we must have lost the ack */
4049		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4050			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4051			    stcb->asoc.overall_error_count,
4052			    0,
4053			    SCTP_FROM_SCTP_INPUT,
4054			    __LINE__);
4055		}
4056		stcb->asoc.overall_error_count = 0;
4057		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4058		    *netp);
4059	}
4060process_control_chunks:
4061	while (IS_SCTP_CONTROL(ch)) {
4062		/* validate chunk length */
4063		chk_length = ntohs(ch->chunk_length);
4064		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4065		    ch->chunk_type, chk_length);
4066		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4067		if (chk_length < sizeof(*ch) ||
4068		    (*offset + (int)chk_length) > length) {
4069			*offset = length;
4070			if (locked_tcb) {
4071				SCTP_TCB_UNLOCK(locked_tcb);
4072			}
4073			return (NULL);
4074		}
4075		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4076		/*
4077		 * INIT-ACK only gets the init ack "header" portion only
4078		 * because we don't have to process the peer's COOKIE. All
4079		 * others get a complete chunk.
4080		 */
4081		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4082		    (ch->chunk_type == SCTP_INITIATION)) {
4083			/* get an init-ack chunk */
4084			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4085			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4086			if (ch == NULL) {
4087				*offset = length;
4088				if (locked_tcb) {
4089					SCTP_TCB_UNLOCK(locked_tcb);
4090				}
4091				return (NULL);
4092			}
4093		} else {
4094			/* For cookies and all other chunks. */
4095			if (chk_length > sizeof(chunk_buf)) {
4096				/*
4097				 * use just the size of the chunk buffer so
4098				 * the front part of our chunks fit in
4099				 * contiguous space up to the chunk buffer
4100				 * size (508 bytes). For chunks that need to
4101				 * get more than that they must use the
4102				 * sctp_m_getptr() function or other means
4103				 * (e.g. know how to parse mbuf chains).
4104				 * Cookies do this already.
4105				 */
4106				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4107				    (sizeof(chunk_buf) - 4),
4108				    chunk_buf);
4109				if (ch == NULL) {
4110					*offset = length;
4111					if (locked_tcb) {
4112						SCTP_TCB_UNLOCK(locked_tcb);
4113					}
4114					return (NULL);
4115				}
4116			} else {
4117				/* We can fit it all */
4118				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4119				    chk_length, chunk_buf);
4120				if (ch == NULL) {
4121					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4122					*offset = length;
4123					if (locked_tcb) {
4124						SCTP_TCB_UNLOCK(locked_tcb);
4125					}
4126					return (NULL);
4127				}
4128			}
4129		}
4130		num_chunks++;
4131		/* Save off the last place we got a control from */
4132		if (stcb != NULL) {
4133			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4134				/*
4135				 * allow last_control to be NULL if
4136				 * ASCONF... ASCONF processing will find the
4137				 * right net later
4138				 */
4139				if ((netp != NULL) && (*netp != NULL))
4140					stcb->asoc.last_control_chunk_from = *netp;
4141			}
4142		}
4143#ifdef SCTP_AUDITING_ENABLED
4144		sctp_audit_log(0xB0, ch->chunk_type);
4145#endif
4146
4147		/* check to see if this chunk required auth, but isn't */
4148		if ((stcb != NULL) &&
4149		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
4150		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4151		    !stcb->asoc.authenticated) {
4152			/* "silently" ignore */
4153			SCTP_STAT_INCR(sctps_recvauthmissing);
4154			goto next_chunk;
4155		}
4156		switch (ch->chunk_type) {
4157		case SCTP_INITIATION:
4158			/* must be first and only chunk */
4159			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4160			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4161				/* We are not interested anymore? */
4162				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4163					/*
4164					 * collision case where we are
4165					 * sending to them too
4166					 */
4167					;
4168				} else {
4169					if (locked_tcb) {
4170						SCTP_TCB_UNLOCK(locked_tcb);
4171					}
4172					*offset = length;
4173					return (NULL);
4174				}
4175			}
4176			if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
4177			    (num_chunks > 1) ||
4178			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4179				*offset = length;
4180				if (locked_tcb) {
4181					SCTP_TCB_UNLOCK(locked_tcb);
4182				}
4183				return (NULL);
4184			}
4185			if ((stcb != NULL) &&
4186			    (SCTP_GET_STATE(&stcb->asoc) ==
4187			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4188				sctp_send_shutdown_ack(stcb,
4189				    stcb->asoc.primary_destination);
4190				*offset = length;
4191				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4192				if (locked_tcb) {
4193					SCTP_TCB_UNLOCK(locked_tcb);
4194				}
4195				return (NULL);
4196			}
4197			if (netp) {
4198				sctp_handle_init(m, iphlen, *offset, sh,
4199				    (struct sctp_init_chunk *)ch, inp,
4200				    stcb, *netp, &abort_no_unlock, vrf_id, port);
4201			}
4202			if (abort_no_unlock)
4203				return (NULL);
4204
4205			*offset = length;
4206			if (locked_tcb) {
4207				SCTP_TCB_UNLOCK(locked_tcb);
4208			}
4209			return (NULL);
4210			break;
4211		case SCTP_PAD_CHUNK:
4212			break;
4213		case SCTP_INITIATION_ACK:
4214			/* must be first and only chunk */
4215			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4216			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4217				/* We are not interested anymore */
4218				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4219					;
4220				} else {
4221					if (locked_tcb) {
4222						SCTP_TCB_UNLOCK(locked_tcb);
4223					}
4224					*offset = length;
4225					if (stcb) {
4226#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4227						so = SCTP_INP_SO(inp);
4228						atomic_add_int(&stcb->asoc.refcnt, 1);
4229						SCTP_TCB_UNLOCK(stcb);
4230						SCTP_SOCKET_LOCK(so, 1);
4231						SCTP_TCB_LOCK(stcb);
4232						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4233#endif
4234						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4235#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4236						SCTP_SOCKET_UNLOCK(so, 1);
4237#endif
4238					}
4239					return (NULL);
4240				}
4241			}
4242			if ((num_chunks > 1) ||
4243			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4244				*offset = length;
4245				if (locked_tcb) {
4246					SCTP_TCB_UNLOCK(locked_tcb);
4247				}
4248				return (NULL);
4249			}
4250			if ((netp) && (*netp)) {
4251				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
4252				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
4253			} else {
4254				ret = -1;
4255			}
4256			/*
4257			 * Special case, I must call the output routine to
4258			 * get the cookie echoed
4259			 */
4260			if (abort_no_unlock)
4261				return (NULL);
4262
4263			if ((stcb) && ret == 0)
4264				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4265			*offset = length;
4266			if (locked_tcb) {
4267				SCTP_TCB_UNLOCK(locked_tcb);
4268			}
4269			return (NULL);
4270			break;
4271		case SCTP_SELECTIVE_ACK:
4272			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4273			SCTP_STAT_INCR(sctps_recvsacks);
4274			{
4275				struct sctp_sack_chunk *sack;
4276				int abort_now = 0;
4277				uint32_t a_rwnd, cum_ack;
4278				uint16_t num_seg;
4279				int nonce_sum_flag;
4280
4281				if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
4282					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
4283			ignore_sack:
4284					*offset = length;
4285					if (locked_tcb) {
4286						SCTP_TCB_UNLOCK(locked_tcb);
4287					}
4288					return (NULL);
4289				}
4290				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4291					/*-
4292					 * If we have sent a shutdown-ack, we will pay no
4293					 * attention to a sack sent in to us since
4294					 * we don't care anymore.
4295					 */
4296					goto ignore_sack;
4297				}
4298				sack = (struct sctp_sack_chunk *)ch;
4299				nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
4300				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4301				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4302				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4303				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4304				    cum_ack,
4305				    num_seg,
4306				    a_rwnd
4307				    );
4308				stcb->asoc.seen_a_sack_this_pkt = 1;
4309				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4310				    (num_seg == 0) &&
4311				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4312				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4313				    (stcb->asoc.saw_sack_with_frags == 0) &&
4314				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4315				    ) {
4316					/*
4317					 * We have a SIMPLE sack having no
4318					 * prior segments and data on sent
4319					 * queue to be acked.. Use the
4320					 * faster path sack processing. We
4321					 * also allow window update sacks
4322					 * with no missing segments to go
4323					 * this way too.
4324					 */
4325					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4326					    &abort_now);
4327				} else {
4328					if (netp && *netp)
4329						sctp_handle_sack(m, *offset,
4330						    sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
4331				}
4332				if (abort_now) {
4333					/* ABORT signal from sack processing */
4334					*offset = length;
4335					return (NULL);
4336				}
4337			}
4338			break;
4339		case SCTP_HEARTBEAT_REQUEST:
4340			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
4341			if ((stcb) && netp && *netp) {
4342				SCTP_STAT_INCR(sctps_recvheartbeat);
4343				sctp_send_heartbeat_ack(stcb, m, *offset,
4344				    chk_length, *netp);
4345
4346				/* He's alive so give him credit */
4347				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4348					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4349					    stcb->asoc.overall_error_count,
4350					    0,
4351					    SCTP_FROM_SCTP_INPUT,
4352					    __LINE__);
4353				}
4354				stcb->asoc.overall_error_count = 0;
4355			}
4356			break;
4357		case SCTP_HEARTBEAT_ACK:
4358			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
4359			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
4360				/* Its not ours */
4361				*offset = length;
4362				if (locked_tcb) {
4363					SCTP_TCB_UNLOCK(locked_tcb);
4364				}
4365				return (NULL);
4366			}
4367			/* He's alive so give him credit */
4368			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4369				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4370				    stcb->asoc.overall_error_count,
4371				    0,
4372				    SCTP_FROM_SCTP_INPUT,
4373				    __LINE__);
4374			}
4375			stcb->asoc.overall_error_count = 0;
4376			SCTP_STAT_INCR(sctps_recvheartbeatack);
4377			if (netp && *netp)
4378				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
4379				    stcb, *netp);
4380			break;
4381		case SCTP_ABORT_ASSOCIATION:
4382			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
4383			    stcb);
4384			if ((stcb) && netp && *netp)
4385				sctp_handle_abort((struct sctp_abort_chunk *)ch,
4386				    stcb, *netp);
4387			*offset = length;
4388			return (NULL);
4389			break;
4390		case SCTP_SHUTDOWN:
4391			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
4392			    stcb);
4393			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
4394				*offset = length;
4395				if (locked_tcb) {
4396					SCTP_TCB_UNLOCK(locked_tcb);
4397				}
4398				return (NULL);
4399			}
4400			if (netp && *netp) {
4401				int abort_flag = 0;
4402
4403				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
4404				    stcb, *netp, &abort_flag);
4405				if (abort_flag) {
4406					*offset = length;
4407					return (NULL);
4408				}
4409			}
4410			break;
4411		case SCTP_SHUTDOWN_ACK:
4412			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
4413			if ((stcb) && (netp) && (*netp))
4414				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
4415			*offset = length;
4416			return (NULL);
4417			break;
4418
4419		case SCTP_OPERATION_ERROR:
4420			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
4421			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
4422
4423				*offset = length;
4424				return (NULL);
4425			}
4426			break;
4427		case SCTP_COOKIE_ECHO:
4428			SCTPDBG(SCTP_DEBUG_INPUT3,
4429			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
4430			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4431				;
4432			} else {
4433				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4434					/* We are not interested anymore */
4435					*offset = length;
4436					return (NULL);
4437				}
4438			}
4439			/*
4440			 * First are we accepting? We do this again here
4441			 * sincen it is possible that a previous endpoint
4442			 * WAS listening responded to a INIT-ACK and then
4443			 * closed. We opened and bound.. and are now no
4444			 * longer listening.
4445			 */
4446
4447			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
4448				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4449				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
4450					struct mbuf *oper;
4451					struct sctp_paramhdr *phdr;
4452
4453					oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4454					    0, M_DONTWAIT, 1, MT_DATA);
4455					if (oper) {
4456						SCTP_BUF_LEN(oper) =
4457						    sizeof(struct sctp_paramhdr);
4458						phdr = mtod(oper,
4459						    struct sctp_paramhdr *);
4460						phdr->param_type =
4461						    htons(SCTP_CAUSE_OUT_OF_RESC);
4462						phdr->param_length =
4463						    htons(sizeof(struct sctp_paramhdr));
4464					}
4465					sctp_abort_association(inp, stcb, m,
4466					    iphlen, sh, oper, vrf_id, port);
4467				}
4468				*offset = length;
4469				return (NULL);
4470			} else {
4471				struct mbuf *ret_buf;
4472				struct sctp_inpcb *linp;
4473
4474				if (stcb) {
4475					linp = NULL;
4476				} else {
4477					linp = inp;
4478				}
4479
4480				if (linp) {
4481					SCTP_ASOC_CREATE_LOCK(linp);
4482				}
4483				if (netp) {
4484					ret_buf =
4485					    sctp_handle_cookie_echo(m, iphlen,
4486					    *offset, sh,
4487					    (struct sctp_cookie_echo_chunk *)ch,
4488					    &inp, &stcb, netp,
4489					    auth_skipped,
4490					    auth_offset,
4491					    auth_len,
4492					    &locked_tcb,
4493					    vrf_id,
4494					    port);
4495				} else {
4496					ret_buf = NULL;
4497				}
4498				if (linp) {
4499					SCTP_ASOC_CREATE_UNLOCK(linp);
4500				}
4501				if (ret_buf == NULL) {
4502					if (locked_tcb) {
4503						SCTP_TCB_UNLOCK(locked_tcb);
4504					}
4505					SCTPDBG(SCTP_DEBUG_INPUT3,
4506					    "GAK, null buffer\n");
4507					auth_skipped = 0;
4508					*offset = length;
4509					return (NULL);
4510				}
4511				/* if AUTH skipped, see if it verified... */
4512				if (auth_skipped) {
4513					got_auth = 1;
4514					auth_skipped = 0;
4515				}
4516				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4517					/*
4518					 * Restart the timer if we have
4519					 * pending data
4520					 */
4521					struct sctp_tmit_chunk *chk;
4522
4523					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4524					if (chk) {
4525						sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4526						    stcb->sctp_ep, stcb,
4527						    chk->whoTo);
4528					}
4529				}
4530			}
4531			break;
4532		case SCTP_COOKIE_ACK:
4533			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4534			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4535				if (locked_tcb) {
4536					SCTP_TCB_UNLOCK(locked_tcb);
4537				}
4538				return (NULL);
4539			}
4540			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4541				/* We are not interested anymore */
4542				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4543					;
4544				} else if (stcb) {
4545#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4546					so = SCTP_INP_SO(inp);
4547					atomic_add_int(&stcb->asoc.refcnt, 1);
4548					SCTP_TCB_UNLOCK(stcb);
4549					SCTP_SOCKET_LOCK(so, 1);
4550					SCTP_TCB_LOCK(stcb);
4551					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4552#endif
4553					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4554#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4555					SCTP_SOCKET_UNLOCK(so, 1);
4556#endif
4557					*offset = length;
4558					return (NULL);
4559				}
4560			}
4561			/* He's alive so give him credit */
4562			if ((stcb) && netp && *netp) {
4563				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4564					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4565					    stcb->asoc.overall_error_count,
4566					    0,
4567					    SCTP_FROM_SCTP_INPUT,
4568					    __LINE__);
4569				}
4570				stcb->asoc.overall_error_count = 0;
4571				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4572			}
4573			break;
4574		case SCTP_ECN_ECHO:
4575			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4576			/* He's alive so give him credit */
4577			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4578				/* Its not ours */
4579				if (locked_tcb) {
4580					SCTP_TCB_UNLOCK(locked_tcb);
4581				}
4582				*offset = length;
4583				return (NULL);
4584			}
4585			if (stcb) {
4586				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4587					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4588					    stcb->asoc.overall_error_count,
4589					    0,
4590					    SCTP_FROM_SCTP_INPUT,
4591					    __LINE__);
4592				}
4593				stcb->asoc.overall_error_count = 0;
4594				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
4595				    stcb);
4596			}
4597			break;
4598		case SCTP_ECN_CWR:
4599			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
4600			/* He's alive so give him credit */
4601			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
4602				/* Its not ours */
4603				if (locked_tcb) {
4604					SCTP_TCB_UNLOCK(locked_tcb);
4605				}
4606				*offset = length;
4607				return (NULL);
4608			}
4609			if (stcb) {
4610				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4611					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4612					    stcb->asoc.overall_error_count,
4613					    0,
4614					    SCTP_FROM_SCTP_INPUT,
4615					    __LINE__);
4616				}
4617				stcb->asoc.overall_error_count = 0;
4618				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
4619			}
4620			break;
4621		case SCTP_SHUTDOWN_COMPLETE:
4622			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
4623			/* must be first and only chunk */
4624			if ((num_chunks > 1) ||
4625			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4626				*offset = length;
4627				if (locked_tcb) {
4628					SCTP_TCB_UNLOCK(locked_tcb);
4629				}
4630				return (NULL);
4631			}
4632			if ((stcb) && netp && *netp) {
4633				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
4634				    stcb, *netp);
4635			}
4636			*offset = length;
4637			return (NULL);
4638			break;
4639		case SCTP_ASCONF:
4640			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
4641			/* He's alive so give him credit */
4642			if (stcb) {
4643				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4644					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4645					    stcb->asoc.overall_error_count,
4646					    0,
4647					    SCTP_FROM_SCTP_INPUT,
4648					    __LINE__);
4649				}
4650				stcb->asoc.overall_error_count = 0;
4651				sctp_handle_asconf(m, *offset,
4652				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
4653				asconf_cnt++;
4654			}
4655			break;
4656		case SCTP_ASCONF_ACK:
4657			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
4658			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
4659				/* Its not ours */
4660				if (locked_tcb) {
4661					SCTP_TCB_UNLOCK(locked_tcb);
4662				}
4663				*offset = length;
4664				return (NULL);
4665			}
4666			if ((stcb) && netp && *netp) {
4667				/* He's alive so give him credit */
4668				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4669					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4670					    stcb->asoc.overall_error_count,
4671					    0,
4672					    SCTP_FROM_SCTP_INPUT,
4673					    __LINE__);
4674				}
4675				stcb->asoc.overall_error_count = 0;
4676				sctp_handle_asconf_ack(m, *offset,
4677				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
4678				if (abort_no_unlock)
4679					return (NULL);
4680			}
4681			break;
4682		case SCTP_FORWARD_CUM_TSN:
4683			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
4684			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
4685				/* Its not ours */
4686				if (locked_tcb) {
4687					SCTP_TCB_UNLOCK(locked_tcb);
4688				}
4689				*offset = length;
4690				return (NULL);
4691			}
4692			/* He's alive so give him credit */
4693			if (stcb) {
4694				int abort_flag = 0;
4695
4696				stcb->asoc.overall_error_count = 0;
4697				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4698					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4699					    stcb->asoc.overall_error_count,
4700					    0,
4701					    SCTP_FROM_SCTP_INPUT,
4702					    __LINE__);
4703				}
4704				*fwd_tsn_seen = 1;
4705				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4706					/* We are not interested anymore */
4707#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4708					so = SCTP_INP_SO(inp);
4709					atomic_add_int(&stcb->asoc.refcnt, 1);
4710					SCTP_TCB_UNLOCK(stcb);
4711					SCTP_SOCKET_LOCK(so, 1);
4712					SCTP_TCB_LOCK(stcb);
4713					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4714#endif
4715					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4716#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4717					SCTP_SOCKET_UNLOCK(so, 1);
4718#endif
4719					*offset = length;
4720					return (NULL);
4721				}
4722				sctp_handle_forward_tsn(stcb,
4723				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
4724				if (abort_flag) {
4725					*offset = length;
4726					return (NULL);
4727				} else {
4728					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4729						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4730						    stcb->asoc.overall_error_count,
4731						    0,
4732						    SCTP_FROM_SCTP_INPUT,
4733						    __LINE__);
4734					}
4735					stcb->asoc.overall_error_count = 0;
4736				}
4737
4738			}
4739			break;
4740		case SCTP_STREAM_RESET:
4741			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
4742			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
4743				/* Its not ours */
4744				if (locked_tcb) {
4745					SCTP_TCB_UNLOCK(locked_tcb);
4746				}
4747				*offset = length;
4748				return (NULL);
4749			}
4750			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4751				/* We are not interested anymore */
4752#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4753				so = SCTP_INP_SO(inp);
4754				atomic_add_int(&stcb->asoc.refcnt, 1);
4755				SCTP_TCB_UNLOCK(stcb);
4756				SCTP_SOCKET_LOCK(so, 1);
4757				SCTP_TCB_LOCK(stcb);
4758				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4759#endif
4760				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
4761#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4762				SCTP_SOCKET_UNLOCK(so, 1);
4763#endif
4764				*offset = length;
4765				return (NULL);
4766			}
4767			if (stcb->asoc.peer_supports_strreset == 0) {
4768				/*
4769				 * hmm, peer should have announced this, but
4770				 * we will turn it on since he is sending us
4771				 * a stream reset.
4772				 */
4773				stcb->asoc.peer_supports_strreset = 1;
4774			}
4775			if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
4776				/* stop processing */
4777				*offset = length;
4778				return (NULL);
4779			}
4780			break;
4781		case SCTP_PACKET_DROPPED:
4782			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
4783			/* re-get it all please */
4784			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
4785				/* Its not ours */
4786				if (locked_tcb) {
4787					SCTP_TCB_UNLOCK(locked_tcb);
4788				}
4789				*offset = length;
4790				return (NULL);
4791			}
4792			if (ch && (stcb) && netp && (*netp)) {
4793				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
4794				    stcb, *netp,
4795				    min(chk_length, (sizeof(chunk_buf) - 4)));
4796
4797			}
4798			break;
4799
4800		case SCTP_AUTHENTICATION:
4801			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
4802			if (SCTP_BASE_SYSCTL(sctp_auth_disable))
4803				goto unknown_chunk;
4804
4805			if (stcb == NULL) {
4806				/* save the first AUTH for later processing */
4807				if (auth_skipped == 0) {
4808					auth_offset = *offset;
4809					auth_len = chk_length;
4810					auth_skipped = 1;
4811				}
4812				/* skip this chunk (temporarily) */
4813				goto next_chunk;
4814			}
4815			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
4816			    (chk_length > (sizeof(struct sctp_auth_chunk) +
4817			    SCTP_AUTH_DIGEST_LEN_MAX))) {
4818				/* Its not ours */
4819				if (locked_tcb) {
4820					SCTP_TCB_UNLOCK(locked_tcb);
4821				}
4822				*offset = length;
4823				return (NULL);
4824			}
4825			if (got_auth == 1) {
4826				/* skip this chunk... it's already auth'd */
4827				goto next_chunk;
4828			}
4829			got_auth = 1;
4830			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
4831			    m, *offset)) {
4832				/* auth HMAC failed so dump the packet */
4833				*offset = length;
4834				return (stcb);
4835			} else {
4836				/* remaining chunks are HMAC checked */
4837				stcb->asoc.authenticated = 1;
4838			}
4839			break;
4840
4841		default:
4842	unknown_chunk:
4843			/* it's an unknown chunk! */
4844			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
4845				struct mbuf *mm;
4846				struct sctp_paramhdr *phd;
4847
4848				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4849				    0, M_DONTWAIT, 1, MT_DATA);
4850				if (mm) {
4851					phd = mtod(mm, struct sctp_paramhdr *);
4852					/*
4853					 * We cheat and use param type since
4854					 * we did not bother to define a
4855					 * error cause struct. They are the
4856					 * same basic format with different
4857					 * names.
4858					 */
4859					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
4860					phd->param_length = htons(chk_length + sizeof(*phd));
4861					SCTP_BUF_LEN(mm) = sizeof(*phd);
4862					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
4863					    M_DONTWAIT);
4864					if (SCTP_BUF_NEXT(mm)) {
4865#ifdef SCTP_MBUF_LOGGING
4866						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
4867							struct mbuf *mat;
4868
4869							mat = SCTP_BUF_NEXT(mm);
4870							while (mat) {
4871								if (SCTP_BUF_IS_EXTENDED(mat)) {
4872									sctp_log_mb(mat, SCTP_MBUF_ICOPY);
4873								}
4874								mat = SCTP_BUF_NEXT(mat);
4875							}
4876						}
4877#endif
4878						sctp_queue_op_err(stcb, mm);
4879					} else {
4880						sctp_m_freem(mm);
4881					}
4882				}
4883			}
4884			if ((ch->chunk_type & 0x80) == 0) {
4885				/* discard this packet */
4886				*offset = length;
4887				return (stcb);
4888			}	/* else skip this bad chunk and continue... */
4889			break;
4890		}		/* switch (ch->chunk_type) */
4891
4892
4893next_chunk:
4894		/* get the next chunk */
4895		*offset += SCTP_SIZE32(chk_length);
4896		if (*offset >= length) {
4897			/* no more data left in the mbuf chain */
4898			break;
4899		}
4900		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4901		    sizeof(struct sctp_chunkhdr), chunk_buf);
4902		if (ch == NULL) {
4903			if (locked_tcb) {
4904				SCTP_TCB_UNLOCK(locked_tcb);
4905			}
4906			*offset = length;
4907			return (NULL);
4908		}
4909	}			/* while */
4910
4911	if (asconf_cnt > 0 && stcb != NULL) {
4912		sctp_send_asconf_ack(stcb);
4913	}
4914	return (stcb);
4915}
4916
4917
4918/*
4919 * Process the ECN bits we have something set so we must look to see if it is
4920 * ECN(0) or ECN(1) or CE
4921 */
4922static void
4923sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
4924    uint8_t ecn_bits)
4925{
4926	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4927		;
4928	} else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
4929		/*
4930		 * we only add to the nonce sum for ECT1, ECT0 does not
4931		 * change the NS bit (that we have yet to find a way to send
4932		 * it yet).
4933		 */
4934
4935		/* ECN Nonce stuff */
4936		stcb->asoc.receiver_nonce_sum++;
4937		stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
4938
4939		/*
4940		 * Drag up the last_echo point if cumack is larger since we
4941		 * don't want the point falling way behind by more than
4942		 * 2^^31 and then having it be incorrect.
4943		 */
4944		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4945		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4946			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4947		}
4948	} else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
4949		/*
4950		 * Drag up the last_echo point if cumack is larger since we
4951		 * don't want the point falling way behind by more than
4952		 * 2^^31 and then having it be incorrect.
4953		 */
4954		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4955		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4956			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4957		}
4958	}
4959}
4960
4961static void
4962sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
4963    uint32_t high_tsn, uint8_t ecn_bits)
4964{
4965	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4966		/*
4967		 * we possibly must notify the sender that a congestion
4968		 * window reduction is in order. We do this by adding a ECNE
4969		 * chunk to the output chunk queue. The incoming CWR will
4970		 * remove this chunk.
4971		 */
4972		if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
4973		    MAX_TSN)) {
4974			/* Yep, we need to add a ECNE */
4975			sctp_send_ecn_echo(stcb, net, high_tsn);
4976			stcb->asoc.last_echo_tsn = high_tsn;
4977		}
4978	}
4979}
4980
4981#ifdef INVARIANTS
4982static void
4983sctp_validate_no_locks(struct sctp_inpcb *inp)
4984{
4985	struct sctp_tcb *stcb;
4986
4987	LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
4988		if (mtx_owned(&stcb->tcb_mtx)) {
4989			panic("Own lock on stcb at return from input");
4990		}
4991	}
4992}
4993
4994#endif
4995
4996/*
4997 * common input chunk processing (v4 and v6)
4998 */
4999void
5000sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
5001    int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
5002    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
5003    uint8_t ecn_bits, uint32_t vrf_id, uint16_t port)
5004{
5005	/*
5006	 * Control chunk processing
5007	 */
5008	uint32_t high_tsn;
5009	int fwd_tsn_seen = 0, data_processed = 0;
5010	struct mbuf *m = *mm;
5011	int abort_flag = 0;
5012	int un_sent;
5013
5014	SCTP_STAT_INCR(sctps_recvdatagrams);
5015#ifdef SCTP_AUDITING_ENABLED
5016	sctp_audit_log(0xE0, 1);
5017	sctp_auditing(0, inp, stcb, net);
5018#endif
5019
5020	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5021	    m, iphlen, offset, length, stcb);
5022	if (stcb) {
5023		/* always clear this before beginning a packet */
5024		stcb->asoc.authenticated = 0;
5025		stcb->asoc.seen_a_sack_this_pkt = 0;
5026		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5027		    stcb, stcb->asoc.state);
5028
5029		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5030		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5031			/*-
5032			 * If we hit here, we had a ref count
5033			 * up when the assoc was aborted and the
5034			 * timer is clearing out the assoc, we should
5035			 * NOT respond to any packet.. its OOTB.
5036			 */
5037			SCTP_TCB_UNLOCK(stcb);
5038			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5039			    vrf_id, port);
5040			goto out_now;
5041		}
5042	}
5043	if (IS_SCTP_CONTROL(ch)) {
5044		/* process the control portion of the SCTP packet */
5045		/* sa_ignore NO_NULL_CHK */
5046		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
5047		    inp, stcb, &net, &fwd_tsn_seen, vrf_id, port);
5048		if (stcb) {
5049			/*
5050			 * This covers us if the cookie-echo was there and
5051			 * it changes our INP.
5052			 */
5053			inp = stcb->sctp_ep;
5054			if ((net) && (port)) {
5055				if (net->port == 0) {
5056					sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5057				}
5058				net->port = port;
5059			}
5060		}
5061	} else {
5062		/*
5063		 * no control chunks, so pre-process DATA chunks (these
5064		 * checks are taken care of by control processing)
5065		 */
5066
5067		/*
5068		 * if DATA only packet, and auth is required, then punt...
5069		 * can't have authenticated without any AUTH (control)
5070		 * chunks
5071		 */
5072		if ((stcb != NULL) &&
5073		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5074		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5075			/* "silently" ignore */
5076			SCTP_STAT_INCR(sctps_recvauthmissing);
5077			SCTP_TCB_UNLOCK(stcb);
5078			goto out_now;
5079		}
5080		if (stcb == NULL) {
5081			/* out of the blue DATA chunk */
5082			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5083			    vrf_id, port);
5084			goto out_now;
5085		}
5086		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5087			/* v_tag mismatch! */
5088			SCTP_STAT_INCR(sctps_badvtag);
5089			SCTP_TCB_UNLOCK(stcb);
5090			goto out_now;
5091		}
5092	}
5093
5094	if (stcb == NULL) {
5095		/*
5096		 * no valid TCB for this packet, or we found it's a bad
5097		 * packet while processing control, or we're done with this
5098		 * packet (done or skip rest of data), so we drop it...
5099		 */
5100		goto out_now;
5101	}
5102	/*
5103	 * DATA chunk processing
5104	 */
5105	/* plow through the data chunks while length > offset */
5106
5107	/*
5108	 * Rest should be DATA only.  Check authentication state if AUTH for
5109	 * DATA is required.
5110	 */
5111	if ((length > offset) &&
5112	    (stcb != NULL) &&
5113	    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5114	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5115	    !stcb->asoc.authenticated) {
5116		/* "silently" ignore */
5117		SCTP_STAT_INCR(sctps_recvauthmissing);
5118		SCTPDBG(SCTP_DEBUG_AUTH1,
5119		    "Data chunk requires AUTH, skipped\n");
5120		goto trigger_send;
5121	}
5122	if (length > offset) {
5123		int retval;
5124
5125		/*
5126		 * First check to make sure our state is correct. We would
5127		 * not get here unless we really did have a tag, so we don't
5128		 * abort if this happens, just dump the chunk silently.
5129		 */
5130		switch (SCTP_GET_STATE(&stcb->asoc)) {
5131		case SCTP_STATE_COOKIE_ECHOED:
5132			/*
5133			 * we consider data with valid tags in this state
5134			 * shows us the cookie-ack was lost. Imply it was
5135			 * there.
5136			 */
5137			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5138				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5139				    stcb->asoc.overall_error_count,
5140				    0,
5141				    SCTP_FROM_SCTP_INPUT,
5142				    __LINE__);
5143			}
5144			stcb->asoc.overall_error_count = 0;
5145			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5146			break;
5147		case SCTP_STATE_COOKIE_WAIT:
5148			/*
5149			 * We consider OOTB any data sent during asoc setup.
5150			 */
5151			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5152			    vrf_id, port);
5153			SCTP_TCB_UNLOCK(stcb);
5154			goto out_now;
5155			/* sa_ignore NOTREACHED */
5156			break;
5157		case SCTP_STATE_EMPTY:	/* should not happen */
5158		case SCTP_STATE_INUSE:	/* should not happen */
5159		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5160		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5161		default:
5162			SCTP_TCB_UNLOCK(stcb);
5163			goto out_now;
5164			/* sa_ignore NOTREACHED */
5165			break;
5166		case SCTP_STATE_OPEN:
5167		case SCTP_STATE_SHUTDOWN_SENT:
5168			break;
5169		}
5170		/* take care of ECN, part 1. */
5171		if (stcb->asoc.ecn_allowed &&
5172		    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5173			sctp_process_ecn_marked_a(stcb, net, ecn_bits);
5174		}
5175		/* plow through the data chunks while length > offset */
5176		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
5177		    inp, stcb, net, &high_tsn);
5178		if (retval == 2) {
5179			/*
5180			 * The association aborted, NO UNLOCK needed since
5181			 * the association is destroyed.
5182			 */
5183			goto out_now;
5184		}
5185		data_processed = 1;
5186		if (retval == 0) {
5187			/* take care of ecn part 2. */
5188			if (stcb->asoc.ecn_allowed &&
5189			    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5190				sctp_process_ecn_marked_b(stcb, net, high_tsn,
5191				    ecn_bits);
5192			}
5193		}
5194		/*
5195		 * Anything important needs to have been m_copy'ed in
5196		 * process_data
5197		 */
5198	}
5199	if ((data_processed == 0) && (fwd_tsn_seen)) {
5200		int was_a_gap = 0;
5201
5202		if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
5203		    stcb->asoc.cumulative_tsn, MAX_TSN)) {
5204			/* there was a gap before this data was processed */
5205			was_a_gap = 1;
5206		}
5207		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
5208		if (abort_flag) {
5209			/* Again, we aborted so NO UNLOCK needed */
5210			goto out_now;
5211		}
5212	}
5213	/* trigger send of any chunks in queue... */
5214trigger_send:
5215#ifdef SCTP_AUDITING_ENABLED
5216	sctp_audit_log(0xE0, 2);
5217	sctp_auditing(1, inp, stcb, net);
5218#endif
5219	SCTPDBG(SCTP_DEBUG_INPUT1,
5220	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5221	    stcb->asoc.peers_rwnd,
5222	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5223	    stcb->asoc.total_flight);
5224	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5225
5226	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
5227	    ((un_sent) &&
5228	    (stcb->asoc.peers_rwnd > 0 ||
5229	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5230		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5231		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5232		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5233	}
5234#ifdef SCTP_AUDITING_ENABLED
5235	sctp_audit_log(0xE0, 3);
5236	sctp_auditing(2, inp, stcb, net);
5237#endif
5238	SCTP_TCB_UNLOCK(stcb);
5239out_now:
5240#ifdef INVARIANTS
5241	sctp_validate_no_locks(inp);
5242#endif
5243	return;
5244}
5245
5246
5247void
5248sctp_input_with_port(i_pak, off, port)
5249	struct mbuf *i_pak;
5250	int off;
5251	uint16_t port;
5252{
5253#ifdef SCTP_MBUF_LOGGING
5254	struct mbuf *mat;
5255
5256#endif
5257	struct mbuf *m;
5258	int iphlen;
5259	uint32_t vrf_id = 0;
5260	uint8_t ecn_bits;
5261	struct ip *ip;
5262	struct sctphdr *sh;
5263	struct sctp_inpcb *inp = NULL;
5264
5265	uint32_t check, calc_check;
5266	struct sctp_nets *net;
5267	struct sctp_tcb *stcb = NULL;
5268	struct sctp_chunkhdr *ch;
5269	int refcount_up = 0;
5270	int length, mlen, offset;
5271
5272
5273	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5274		SCTP_RELEASE_PKT(i_pak);
5275		return;
5276	}
5277	mlen = SCTP_HEADER_LEN(i_pak);
5278	iphlen = off;
5279	m = SCTP_HEADER_TO_CHAIN(i_pak);
5280
5281	net = NULL;
5282	SCTP_STAT_INCR(sctps_recvpackets);
5283	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5284
5285
5286#ifdef SCTP_MBUF_LOGGING
5287	/* Log in any input mbufs */
5288	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5289		mat = m;
5290		while (mat) {
5291			if (SCTP_BUF_IS_EXTENDED(mat)) {
5292				sctp_log_mb(mat, SCTP_MBUF_INPUT);
5293			}
5294			mat = SCTP_BUF_NEXT(mat);
5295		}
5296	}
5297#endif
5298#ifdef  SCTP_PACKET_LOGGING
5299	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
5300		sctp_packet_log(m, mlen);
5301#endif
5302	/*
5303	 * Must take out the iphlen, since mlen expects this (only effect lb
5304	 * case)
5305	 */
5306	mlen -= iphlen;
5307
5308	/*
5309	 * Get IP, SCTP, and first chunk header together in first mbuf.
5310	 */
5311	ip = mtod(m, struct ip *);
5312	offset = iphlen + sizeof(*sh) + sizeof(*ch);
5313	if (SCTP_BUF_LEN(m) < offset) {
5314		if ((m = m_pullup(m, offset)) == 0) {
5315			SCTP_STAT_INCR(sctps_hdrops);
5316			return;
5317		}
5318		ip = mtod(m, struct ip *);
5319	}
5320	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5321	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
5322	SCTPDBG(SCTP_DEBUG_INPUT1,
5323	    "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
5324
5325	/* SCTP does not allow broadcasts or multicasts */
5326	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
5327		goto bad;
5328	}
5329	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
5330		/*
5331		 * We only look at broadcast if its a front state, All
5332		 * others we will not have a tcb for anyway.
5333		 */
5334		goto bad;
5335	}
5336	/* validate SCTP checksum */
5337	check = sh->checksum;	/* save incoming checksum */
5338	if ((check == 0) && (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback)) &&
5339	    ((ip->ip_src.s_addr == ip->ip_dst.s_addr) ||
5340	    (SCTP_IS_IT_LOOPBACK(m)))
5341	    ) {
5342		goto sctp_skip_csum_4;
5343	}
5344	sh->checksum = 0;	/* prepare for calc */
5345	calc_check = sctp_calculate_sum(m, &mlen, iphlen);
5346	if (calc_check != check) {
5347		SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5348		    calc_check, check, m, mlen, iphlen);
5349
5350		stcb = sctp_findassociation_addr(m, iphlen,
5351		    offset - sizeof(*ch),
5352		    sh, ch, &inp, &net,
5353		    vrf_id);
5354		if ((net) && (port)) {
5355			if (net->port == 0) {
5356				sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5357			}
5358			net->port = port;
5359		}
5360		if ((inp) && (stcb)) {
5361			sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
5362			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5363		} else if ((inp != NULL) && (stcb == NULL)) {
5364			refcount_up = 1;
5365		}
5366		SCTP_STAT_INCR(sctps_badsum);
5367		SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5368		goto bad;
5369	}
5370	sh->checksum = calc_check;
5371sctp_skip_csum_4:
5372	/* destination port of 0 is illegal, based on RFC2960. */
5373	if (sh->dest_port == 0) {
5374		SCTP_STAT_INCR(sctps_hdrops);
5375		goto bad;
5376	}
5377	/* validate mbuf chain length with IP payload length */
5378	if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) {
5379		SCTP_STAT_INCR(sctps_hdrops);
5380		goto bad;
5381	}
5382	/*
5383	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
5384	 * IP/SCTP/first chunk header...
5385	 */
5386	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
5387	    sh, ch, &inp, &net, vrf_id);
5388	if ((net) && (port)) {
5389		if (net->port == 0) {
5390			sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5391		}
5392		net->port = port;
5393	}
5394	/* inp's ref-count increased && stcb locked */
5395	if (inp == NULL) {
5396		struct sctp_init_chunk *init_chk, chunk_buf;
5397
5398		SCTP_STAT_INCR(sctps_noport);
5399#ifdef ICMP_BANDLIM
5400		/*
5401		 * we use the bandwidth limiting to protect against sending
5402		 * too many ABORTS all at once. In this case these count the
5403		 * same as an ICMP message.
5404		 */
5405		if (badport_bandlim(0) < 0)
5406			goto bad;
5407#endif				/* ICMP_BANDLIM */
5408		SCTPDBG(SCTP_DEBUG_INPUT1,
5409		    "Sending a ABORT from packet entry!\n");
5410		if (ch->chunk_type == SCTP_INITIATION) {
5411			/*
5412			 * we do a trick here to get the INIT tag, dig in
5413			 * and get the tag from the INIT and put it in the
5414			 * common header.
5415			 */
5416			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
5417			    iphlen + sizeof(*sh), sizeof(*init_chk),
5418			    (uint8_t *) & chunk_buf);
5419			if (init_chk != NULL)
5420				sh->v_tag = init_chk->init.initiate_tag;
5421		}
5422		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5423			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
5424			goto bad;
5425		}
5426		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5427			goto bad;
5428		}
5429		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
5430			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
5431		goto bad;
5432	} else if (stcb == NULL) {
5433		refcount_up = 1;
5434	}
5435#ifdef IPSEC
5436	/*
5437	 * I very much doubt any of the IPSEC stuff will work but I have no
5438	 * idea, so I will leave it in place.
5439	 */
5440	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5441		MODULE_GLOBAL(MOD_IPSEC, ipsec4stat).in_polvio++;
5442		SCTP_STAT_INCR(sctps_hdrops);
5443		goto bad;
5444	}
5445#endif				/* IPSEC */
5446
5447	/*
5448	 * common chunk processing
5449	 */
5450	length = ip->ip_len + iphlen;
5451	offset -= sizeof(struct sctp_chunkhdr);
5452
5453	ecn_bits = ip->ip_tos;
5454
5455	/* sa_ignore NO_NULL_CHK */
5456	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
5457	    inp, stcb, net, ecn_bits, vrf_id, port);
5458	/* inp's ref-count reduced && stcb unlocked */
5459	if (m) {
5460		sctp_m_freem(m);
5461	}
5462	if ((inp) && (refcount_up)) {
5463		/* reduce ref-count */
5464		SCTP_INP_DECR_REF(inp);
5465	}
5466	return;
5467bad:
5468	if (stcb) {
5469		SCTP_TCB_UNLOCK(stcb);
5470	}
5471	if ((inp) && (refcount_up)) {
5472		/* reduce ref-count */
5473		SCTP_INP_DECR_REF(inp);
5474	}
5475	if (m) {
5476		sctp_m_freem(m);
5477	}
5478	return;
5479}
5480void
5481sctp_input(i_pak, off)
5482	struct mbuf *i_pak;
5483	int off;
5484{
5485	sctp_input_with_port(i_pak, off, 0);
5486}
5487