sctp_input.c revision 216822
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 216822 2010-12-30 16:56:20Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_auth.h>
45#include <netinet/sctp_indata.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_bsd_addr.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/sctp_crc32.h>
50#include <netinet/udp.h>
51
52
53
54static void
55sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
56{
57	struct sctp_nets *net;
58
59	/*
60	 * This now not only stops all cookie timers it also stops any INIT
61	 * timers as well. This will make sure that the timers are stopped
62	 * in all collision cases.
63	 */
64	SCTP_TCB_LOCK_ASSERT(stcb);
65	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
66		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
67			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
68			    stcb->sctp_ep,
69			    stcb,
70			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
71		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
72			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
73			    stcb->sctp_ep,
74			    stcb,
75			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
76		}
77	}
78}
79
80/* INIT handler */
81static void
82sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
83    struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
84    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port)
85{
86	struct sctp_init *init;
87	struct mbuf *op_err;
88	uint32_t init_limit;
89
90	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
91	    stcb);
92	if (stcb == NULL) {
93		SCTP_INP_RLOCK(inp);
94		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
95			goto outnow;
96		}
97	}
98	op_err = NULL;
99	init = &cp->init;
100	/* First are we accepting? */
101	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
102		SCTPDBG(SCTP_DEBUG_INPUT2,
103		    "sctp_handle_init: Abort, so_qlimit:%d\n",
104		    inp->sctp_socket->so_qlimit);
105		/*
106		 * FIX ME ?? What about TCP model and we have a
107		 * match/restart case? Actually no fix is needed. the lookup
108		 * will always find the existing assoc so stcb would not be
109		 * NULL. It may be questionable to do this since we COULD
110		 * just send back the INIT-ACK and hope that the app did
111		 * accept()'s by the time the COOKIE was sent. But there is
112		 * a price to pay for COOKIE generation and I don't want to
113		 * pay it on the chance that the app will actually do some
114		 * accepts(). The App just looses and should NOT be in this
115		 * state :-)
116		 */
117		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
118		    vrf_id, port);
119		if (stcb)
120			*abort_no_unlock = 1;
121		goto outnow;
122	}
123	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
124		/* Invalid length */
125		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
126		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
127		    vrf_id, port);
128		if (stcb)
129			*abort_no_unlock = 1;
130		goto outnow;
131	}
132	/* validate parameters */
133	if (init->initiate_tag == 0) {
134		/* protocol error... send abort */
135		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
136		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
137		    vrf_id, port);
138		if (stcb)
139			*abort_no_unlock = 1;
140		goto outnow;
141	}
142	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
143		/* invalid parameter... send abort */
144		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
145		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
146		    vrf_id, port);
147		if (stcb)
148			*abort_no_unlock = 1;
149		goto outnow;
150	}
151	if (init->num_inbound_streams == 0) {
152		/* protocol error... send abort */
153		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
154		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
155		    vrf_id, port);
156		if (stcb)
157			*abort_no_unlock = 1;
158		goto outnow;
159	}
160	if (init->num_outbound_streams == 0) {
161		/* protocol error... send abort */
162		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
163		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
164		    vrf_id, port);
165		if (stcb)
166			*abort_no_unlock = 1;
167		goto outnow;
168	}
169	init_limit = offset + ntohs(cp->ch.chunk_length);
170	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
171	    init_limit)) {
172		/* auth parameter(s) error... send abort */
173		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port);
174		if (stcb)
175			*abort_no_unlock = 1;
176		goto outnow;
177	}
178	/* send an INIT-ACK w/cookie */
179	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
180	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port,
181	    ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
182outnow:
183	if (stcb == NULL) {
184		SCTP_INP_RUNLOCK(inp);
185	}
186}
187
188/*
189 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
190 */
191
192int
193sctp_is_there_unsent_data(struct sctp_tcb *stcb)
194{
195	int unsent_data = 0;
196	struct sctp_stream_queue_pending *sp, *nsp;
197	struct sctp_stream_out *strq;
198	struct sctp_association *asoc;
199
200	/*
201	 * This function returns the number of streams that have true unsent
202	 * data on them. Note that as it looks through it will clean up any
203	 * places that have old data that has been sent but left at top of
204	 * stream queue.
205	 */
206	asoc = &stcb->asoc;
207	SCTP_TCB_SEND_LOCK(stcb);
208	TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) {
209		/* sa_ignore FREED_MEMORY */
210		TAILQ_FOREACH_SAFE(sp, &strq->outqueue, next, nsp) {
211			if ((sp->msg_is_complete) &&
212			    (sp->length == 0) &&
213			    (sp->sender_all_done)) {
214				/*
215				 * We are doing differed cleanup. Last time
216				 * through when we took all the data the
217				 * sender_all_done was not set.
218				 */
219				if (sp->put_last_out == 0) {
220					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
221					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
222					    sp->sender_all_done,
223					    sp->length,
224					    sp->msg_is_complete,
225					    sp->put_last_out);
226				}
227				atomic_subtract_int(&asoc->stream_queue_cnt, 1);
228				TAILQ_REMOVE(&strq->outqueue, sp, next);
229				if (sp->net) {
230					sctp_free_remote_addr(sp->net);
231					sp->net = NULL;
232				}
233				if (sp->data) {
234					sctp_m_freem(sp->data);
235					sp->data = NULL;
236				}
237				sctp_free_a_strmoq(stcb, sp);
238			} else {
239				unsent_data++;
240				break;
241			}
242		}
243	}
244	SCTP_TCB_SEND_UNLOCK(stcb);
245	return (unsent_data);
246}
247
248static int
249sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
250    struct sctp_nets *net)
251{
252	struct sctp_init *init;
253	struct sctp_association *asoc;
254	struct sctp_nets *lnet;
255	unsigned int i;
256
257	init = &cp->init;
258	asoc = &stcb->asoc;
259	/* save off parameters */
260	asoc->peer_vtag = ntohl(init->initiate_tag);
261	asoc->peers_rwnd = ntohl(init->a_rwnd);
262	if (!TAILQ_EMPTY(&asoc->nets)) {
263		/* update any ssthresh's that may have a default */
264		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
265			lnet->ssthresh = asoc->peers_rwnd;
266
267			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
268				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
269			}
270		}
271	}
272	SCTP_TCB_SEND_LOCK(stcb);
273	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
274		unsigned int newcnt;
275		struct sctp_stream_out *outs;
276		struct sctp_stream_queue_pending *sp, *nsp;
277		struct sctp_tmit_chunk *chk, *nchk;
278
279		/* abandon the upper streams */
280		newcnt = ntohs(init->num_inbound_streams);
281		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
282			if (chk->rec.data.stream_number >= newcnt) {
283				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
284				asoc->send_queue_cnt--;
285				if (chk->data != NULL) {
286					sctp_free_bufspace(stcb, asoc, chk, 1);
287					sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
288					    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, SCTP_SO_NOT_LOCKED);
289					if (chk->data) {
290						sctp_m_freem(chk->data);
291						chk->data = NULL;
292					}
293				}
294				sctp_free_a_chunk(stcb, chk);
295				/* sa_ignore FREED_MEMORY */
296			}
297		}
298		if (asoc->strmout) {
299			for (i = newcnt; i < asoc->pre_open_streams; i++) {
300				outs = &asoc->strmout[i];
301				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
302					TAILQ_REMOVE(&outs->outqueue, sp, next);
303					asoc->stream_queue_cnt--;
304					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
305					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
306					    sp, SCTP_SO_NOT_LOCKED);
307					if (sp->data) {
308						sctp_m_freem(sp->data);
309						sp->data = NULL;
310					}
311					if (sp->net) {
312						sctp_free_remote_addr(sp->net);
313						sp->net = NULL;
314					}
315					/* Free the chunk */
316					sctp_free_a_strmoq(stcb, sp);
317					/* sa_ignore FREED_MEMORY */
318				}
319			}
320		}
321		/* cut back the count */
322		asoc->pre_open_streams = newcnt;
323	}
324	SCTP_TCB_SEND_UNLOCK(stcb);
325	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
326	/* init tsn's */
327	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
328	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
329	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
330	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
331		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
332	}
333	/* This is the next one we expect */
334	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
335
336	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
337	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
338	asoc->last_echo_tsn = asoc->asconf_seq_in;
339	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
340	/* open the requested streams */
341
342	if (asoc->strmin != NULL) {
343		/* Free the old ones */
344		struct sctp_queued_to_read *ctl, *nctl;
345
346		for (i = 0; i < asoc->streamincnt; i++) {
347			TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) {
348				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
349				sctp_free_remote_addr(ctl->whoFrom);
350				ctl->whoFrom = NULL;
351				sctp_m_freem(ctl->data);
352				ctl->data = NULL;
353				sctp_free_a_readq(stcb, ctl);
354			}
355		}
356		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
357	}
358	asoc->streamincnt = ntohs(init->num_outbound_streams);
359	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
360		asoc->streamincnt = MAX_SCTP_STREAMS;
361	}
362	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
363	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
364	if (asoc->strmin == NULL) {
365		/* we didn't get memory for the streams! */
366		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
367		return (-1);
368	}
369	for (i = 0; i < asoc->streamincnt; i++) {
370		asoc->strmin[i].stream_no = i;
371		asoc->strmin[i].last_sequence_delivered = 0xffff;
372		/*
373		 * U-stream ranges will be set when the cookie is unpacked.
374		 * Or for the INIT sender they are un set (if pr-sctp not
375		 * supported) when the INIT-ACK arrives.
376		 */
377		TAILQ_INIT(&asoc->strmin[i].inqueue);
378		asoc->strmin[i].delivery_started = 0;
379	}
380	/*
381	 * load_address_from_init will put the addresses into the
382	 * association when the COOKIE is processed or the INIT-ACK is
383	 * processed. Both types of COOKIE's existing and new call this
384	 * routine. It will remove addresses that are no longer in the
385	 * association (for the restarting case where addresses are
386	 * removed). Up front when the INIT arrives we will discard it if it
387	 * is a restart and new addresses have been added.
388	 */
389	/* sa_ignore MEMLEAK */
390	return (0);
391}
392
393/*
394 * INIT-ACK message processing/consumption returns value < 0 on error
395 */
396static int
397sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
398    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
399    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
400{
401	struct sctp_association *asoc;
402	struct mbuf *op_err;
403	int retval, abort_flag;
404	uint32_t initack_limit;
405	int nat_friendly = 0;
406
407	/* First verify that we have no illegal param's */
408	abort_flag = 0;
409	op_err = NULL;
410
411	op_err = sctp_arethere_unrecognized_parameters(m,
412	    (offset + sizeof(struct sctp_init_chunk)),
413	    &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
414	if (abort_flag) {
415		/* Send an abort and notify peer */
416		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
417		*abort_no_unlock = 1;
418		return (-1);
419	}
420	asoc = &stcb->asoc;
421	asoc->peer_supports_nat = (uint8_t) nat_friendly;
422	/* process the peer's parameters in the INIT-ACK */
423	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
424	if (retval < 0) {
425		return (retval);
426	}
427	initack_limit = offset + ntohs(cp->ch.chunk_length);
428	/* load all addresses */
429	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
430	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
431	    NULL))) {
432		/* Huh, we should abort */
433		SCTPDBG(SCTP_DEBUG_INPUT1,
434		    "Load addresses from INIT causes an abort %d\n",
435		    retval);
436		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
437		    NULL, 0, net->port);
438		*abort_no_unlock = 1;
439		return (-1);
440	}
441	/* if the peer doesn't support asconf, flush the asconf queue */
442	if (asoc->peer_supports_asconf == 0) {
443		struct sctp_asconf_addr *param, *nparam;
444
445		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
446			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
447			SCTP_FREE(param, SCTP_M_ASC_ADDR);
448		}
449	}
450	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
451	    stcb->asoc.local_hmacs);
452	if (op_err) {
453		sctp_queue_op_err(stcb, op_err);
454		/* queuing will steal away the mbuf chain to the out queue */
455		op_err = NULL;
456	}
457	/* extract the cookie and queue it to "echo" it back... */
458	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
459		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
460		    stcb->asoc.overall_error_count,
461		    0,
462		    SCTP_FROM_SCTP_INPUT,
463		    __LINE__);
464	}
465	stcb->asoc.overall_error_count = 0;
466	net->error_count = 0;
467
468	/*
469	 * Cancel the INIT timer, We do this first before queueing the
470	 * cookie. We always cancel at the primary to assue that we are
471	 * canceling the timer started by the INIT which always goes to the
472	 * primary.
473	 */
474	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
475	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
476
477	/* calculate the RTO */
478	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy);
479
480	retval = sctp_send_cookie_echo(m, offset, stcb, net);
481	if (retval < 0) {
482		/*
483		 * No cookie, we probably should send a op error. But in any
484		 * case if there is no cookie in the INIT-ACK, we can
485		 * abandon the peer, its broke.
486		 */
487		if (retval == -3) {
488			/* We abort with an error of missing mandatory param */
489			op_err =
490			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
491			if (op_err) {
492				/*
493				 * Expand beyond to include the mandatory
494				 * param cookie
495				 */
496				struct sctp_inv_mandatory_param *mp;
497
498				SCTP_BUF_LEN(op_err) =
499				    sizeof(struct sctp_inv_mandatory_param);
500				mp = mtod(op_err,
501				    struct sctp_inv_mandatory_param *);
502				/* Subtract the reserved param */
503				mp->length =
504				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
505				mp->num_param = htonl(1);
506				mp->param = htons(SCTP_STATE_COOKIE);
507				mp->resv = 0;
508			}
509			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
510			    sh, op_err, 0, net->port);
511			*abort_no_unlock = 1;
512		}
513		return (retval);
514	}
515	return (0);
516}
517
518static void
519sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
520    struct sctp_tcb *stcb, struct sctp_nets *net)
521{
522	struct sockaddr_storage store;
523	struct sockaddr_in *sin;
524	struct sockaddr_in6 *sin6;
525	struct sctp_nets *r_net, *f_net;
526	struct timeval tv;
527	int req_prim = 0;
528
529	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
530		/* Invalid length */
531		return;
532	}
533	sin = (struct sockaddr_in *)&store;
534	sin6 = (struct sockaddr_in6 *)&store;
535
536	memset(&store, 0, sizeof(store));
537	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
538	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
539		sin->sin_family = cp->heartbeat.hb_info.addr_family;
540		sin->sin_len = cp->heartbeat.hb_info.addr_len;
541		sin->sin_port = stcb->rport;
542		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
543		    sizeof(sin->sin_addr));
544	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
545	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
546		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
547		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
548		sin6->sin6_port = stcb->rport;
549		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
550		    sizeof(sin6->sin6_addr));
551	} else {
552		return;
553	}
554	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
555	if (r_net == NULL) {
556		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
557		return;
558	}
559	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
560	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
561	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
562		/*
563		 * If the its a HB and it's random value is correct when can
564		 * confirm the destination.
565		 */
566		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
567		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
568			stcb->asoc.primary_destination = r_net;
569			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
570			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
571			f_net = TAILQ_FIRST(&stcb->asoc.nets);
572			if (f_net != r_net) {
573				/*
574				 * first one on the list is NOT the primary
575				 * sctp_cmpaddr() is much more efficent if
576				 * the primary is the first on the list,
577				 * make it so.
578				 */
579				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
580				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
581			}
582			req_prim = 1;
583		}
584		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
585		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
586	}
587	r_net->error_count = 0;
588	r_net->hb_responded = 1;
589	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
590	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
591	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
592		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
593		r_net->dest_state |= SCTP_ADDR_REACHABLE;
594		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
595		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED);
596		/* now was it the primary? if so restore */
597		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
598			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
599		}
600	}
601	/*
602	 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state,
603	 * set the destination to active state and set the cwnd to one or
604	 * two MTU's based on whether PF1 or PF2 is being used. If a T3
605	 * timer is running, for the destination, stop the timer because a
606	 * PF-heartbeat was received.
607	 */
608	if ((stcb->asoc.sctp_cmt_on_off > 0) &&
609	    (stcb->asoc.sctp_cmt_pf > 0) &&
610	    ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
611		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
612			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
613			    stcb, net,
614			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
615		}
616		net->dest_state &= ~SCTP_ADDR_PF;
617		net->cwnd = net->mtu * stcb->asoc.sctp_cmt_pf;
618		SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
619		    net, net->cwnd);
620	}
621	/* Now lets do a RTO with this */
622	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy);
623	/* Mobility adaptation */
624	if (req_prim) {
625		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
626		    SCTP_MOBILITY_BASE) ||
627		    sctp_is_mobility_feature_on(stcb->sctp_ep,
628		    SCTP_MOBILITY_FASTHANDOFF)) &&
629		    sctp_is_mobility_feature_on(stcb->sctp_ep,
630		    SCTP_MOBILITY_PRIM_DELETED)) {
631
632			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
633			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
634			    SCTP_MOBILITY_FASTHANDOFF)) {
635				sctp_assoc_immediate_retrans(stcb,
636				    stcb->asoc.primary_destination);
637			}
638			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
639			    SCTP_MOBILITY_BASE)) {
640				sctp_move_chunks_from_net(stcb,
641				    stcb->asoc.deleted_primary);
642			}
643			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
644			    stcb->asoc.deleted_primary);
645		}
646	}
647}
648
649static int
650sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
651{
652	/*
653	 * return 0 means we want you to proceed with the abort non-zero
654	 * means no abort processing
655	 */
656	struct sctpasochead *head;
657
658	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
659		/* generate a new vtag and send init */
660		LIST_REMOVE(stcb, sctp_asocs);
661		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
662		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
663		/*
664		 * put it in the bucket in the vtag hash of assoc's for the
665		 * system
666		 */
667		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
668		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
669		return (1);
670	}
671	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
672		/*
673		 * treat like a case where the cookie expired i.e.: - dump
674		 * current cookie. - generate a new vtag. - resend init.
675		 */
676		/* generate a new vtag and send init */
677		LIST_REMOVE(stcb, sctp_asocs);
678		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
679		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
680		sctp_stop_all_cookie_timers(stcb);
681		sctp_toss_old_cookies(stcb, &stcb->asoc);
682		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
683		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
684		/*
685		 * put it in the bucket in the vtag hash of assoc's for the
686		 * system
687		 */
688		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
689		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
690		return (1);
691	}
692	return (0);
693}
694
695static int
696sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
697    struct sctp_nets *net)
698{
699	/*
700	 * return 0 means we want you to proceed with the abort non-zero
701	 * means no abort processing
702	 */
703	if (stcb->asoc.peer_supports_auth == 0) {
704		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
705		return (0);
706	}
707	sctp_asconf_send_nat_state_update(stcb, net);
708	return (1);
709}
710
711
712static void
713sctp_handle_abort(struct sctp_abort_chunk *cp,
714    struct sctp_tcb *stcb, struct sctp_nets *net)
715{
716#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
717	struct socket *so;
718
719#endif
720	uint16_t len;
721
722	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
723	if (stcb == NULL)
724		return;
725
726	len = ntohs(cp->ch.chunk_length);
727	if (len > sizeof(struct sctp_chunkhdr)) {
728		/*
729		 * Need to check the cause codes for our two magic nat
730		 * aborts which don't kill the assoc necessarily.
731		 */
732		struct sctp_abort_chunk *cpnext;
733		struct sctp_missing_nat_state *natc;
734		uint16_t cause;
735
736		cpnext = cp;
737		cpnext++;
738		natc = (struct sctp_missing_nat_state *)cpnext;
739		cause = ntohs(natc->cause);
740		if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) {
741			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
742			    cp->ch.chunk_flags);
743			if (sctp_handle_nat_colliding_state(stcb)) {
744				return;
745			}
746		} else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) {
747			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
748			    cp->ch.chunk_flags);
749			if (sctp_handle_nat_missing_state(stcb, net)) {
750				return;
751			}
752		}
753	}
754	/* stop any receive timers */
755	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
756	/* notify user of the abort and clean up... */
757	sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
758	/* free the tcb */
759#if defined(SCTP_PANIC_ON_ABORT)
760	printf("stcb:%p state:%d rport:%d net:%p\n",
761	    stcb, stcb->asoc.state, stcb->rport, net);
762	if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
763		panic("Received an ABORT");
764	} else {
765		printf("No panic its in state %x closed\n", stcb->asoc.state);
766	}
767#endif
768	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
769	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
770	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
771		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
772	}
773#ifdef SCTP_ASOCLOG_OF_TSNS
774	sctp_print_out_track_log(stcb);
775#endif
776#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
777	so = SCTP_INP_SO(stcb->sctp_ep);
778	atomic_add_int(&stcb->asoc.refcnt, 1);
779	SCTP_TCB_UNLOCK(stcb);
780	SCTP_SOCKET_LOCK(so, 1);
781	SCTP_TCB_LOCK(stcb);
782	atomic_subtract_int(&stcb->asoc.refcnt, 1);
783#endif
784	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
785	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
786	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
787#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
788	SCTP_SOCKET_UNLOCK(so, 1);
789#endif
790	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
791}
792
793static void
794sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
795    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
796{
797	struct sctp_association *asoc;
798	int some_on_streamwheel;
799
800#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
801	struct socket *so;
802
803#endif
804
805	SCTPDBG(SCTP_DEBUG_INPUT2,
806	    "sctp_handle_shutdown: handling SHUTDOWN\n");
807	if (stcb == NULL)
808		return;
809	asoc = &stcb->asoc;
810	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
811	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
812		return;
813	}
814	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
815		/* Shutdown NOT the expected size */
816		return;
817	} else {
818		sctp_update_acked(stcb, cp, net, abort_flag);
819		if (*abort_flag) {
820			return;
821		}
822	}
823	if (asoc->control_pdapi) {
824		/*
825		 * With a normal shutdown we assume the end of last record.
826		 */
827		SCTP_INP_READ_LOCK(stcb->sctp_ep);
828		asoc->control_pdapi->end_added = 1;
829		asoc->control_pdapi->pdapi_aborted = 1;
830		asoc->control_pdapi = NULL;
831		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
832#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
833		so = SCTP_INP_SO(stcb->sctp_ep);
834		atomic_add_int(&stcb->asoc.refcnt, 1);
835		SCTP_TCB_UNLOCK(stcb);
836		SCTP_SOCKET_LOCK(so, 1);
837		SCTP_TCB_LOCK(stcb);
838		atomic_subtract_int(&stcb->asoc.refcnt, 1);
839		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
840			/* assoc was freed while we were unlocked */
841			SCTP_SOCKET_UNLOCK(so, 1);
842			return;
843		}
844#endif
845		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
846#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
847		SCTP_SOCKET_UNLOCK(so, 1);
848#endif
849	}
850	/* goto SHUTDOWN_RECEIVED state to block new requests */
851	if (stcb->sctp_socket) {
852		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
853		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
854		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
855			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
856			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
857			/*
858			 * notify upper layer that peer has initiated a
859			 * shutdown
860			 */
861			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
862
863			/* reset time */
864			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
865		}
866	}
867	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
868		/*
869		 * stop the shutdown timer, since we WILL move to
870		 * SHUTDOWN-ACK-SENT.
871		 */
872		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
873	}
874	/* Now is there unsent data on a stream somewhere? */
875	some_on_streamwheel = sctp_is_there_unsent_data(stcb);
876
877	if (!TAILQ_EMPTY(&asoc->send_queue) ||
878	    !TAILQ_EMPTY(&asoc->sent_queue) ||
879	    some_on_streamwheel) {
880		/* By returning we will push more data out */
881		return;
882	} else {
883		/* no outstanding data to send, so move on... */
884		/* send SHUTDOWN-ACK */
885		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
886		/* move to SHUTDOWN-ACK-SENT state */
887		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
888		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
889			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
890		}
891		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
892		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
893		sctp_stop_timers_for_shutdown(stcb);
894		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
895		    stcb, net);
896	}
897}
898
899static void
900sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
901    struct sctp_tcb *stcb,
902    struct sctp_nets *net)
903{
904	struct sctp_association *asoc;
905
906#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
907	struct socket *so;
908
909	so = SCTP_INP_SO(stcb->sctp_ep);
910#endif
911	SCTPDBG(SCTP_DEBUG_INPUT2,
912	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
913	if (stcb == NULL)
914		return;
915
916	asoc = &stcb->asoc;
917	/* process according to association state */
918	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
919	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
920		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
921		sctp_send_shutdown_complete(stcb, net, 1);
922		SCTP_TCB_UNLOCK(stcb);
923		return;
924	}
925	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
926	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
927		/* unexpected SHUTDOWN-ACK... so ignore... */
928		SCTP_TCB_UNLOCK(stcb);
929		return;
930	}
931	if (asoc->control_pdapi) {
932		/*
933		 * With a normal shutdown we assume the end of last record.
934		 */
935		SCTP_INP_READ_LOCK(stcb->sctp_ep);
936		asoc->control_pdapi->end_added = 1;
937		asoc->control_pdapi->pdapi_aborted = 1;
938		asoc->control_pdapi = NULL;
939		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
940#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
941		atomic_add_int(&stcb->asoc.refcnt, 1);
942		SCTP_TCB_UNLOCK(stcb);
943		SCTP_SOCKET_LOCK(so, 1);
944		SCTP_TCB_LOCK(stcb);
945		atomic_subtract_int(&stcb->asoc.refcnt, 1);
946		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
947			/* assoc was freed while we were unlocked */
948			SCTP_SOCKET_UNLOCK(so, 1);
949			return;
950		}
951#endif
952		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
953#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
954		SCTP_SOCKET_UNLOCK(so, 1);
955#endif
956	}
957	/* are the queues empty? */
958	if (!TAILQ_EMPTY(&asoc->send_queue) ||
959	    !TAILQ_EMPTY(&asoc->sent_queue) ||
960	    !TAILQ_EMPTY(&asoc->out_wheel)) {
961		sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
962	}
963	/* stop the timer */
964	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
965	/* send SHUTDOWN-COMPLETE */
966	sctp_send_shutdown_complete(stcb, net, 0);
967	/* notify upper layer protocol */
968	if (stcb->sctp_socket) {
969		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
970		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
971		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
972			/* Set the connected flag to disconnected */
973			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
974		}
975	}
976	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
977	/* free the TCB but first save off the ep */
978#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
979	atomic_add_int(&stcb->asoc.refcnt, 1);
980	SCTP_TCB_UNLOCK(stcb);
981	SCTP_SOCKET_LOCK(so, 1);
982	SCTP_TCB_LOCK(stcb);
983	atomic_subtract_int(&stcb->asoc.refcnt, 1);
984#endif
985	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
986	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
987#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
988	SCTP_SOCKET_UNLOCK(so, 1);
989#endif
990}
991
992/*
993 * Skip past the param header and then we will find the chunk that caused the
994 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
995 * our peer must be broken.
996 */
997static void
998sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
999    struct sctp_nets *net)
1000{
1001	struct sctp_chunkhdr *chk;
1002
1003	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
1004	switch (chk->chunk_type) {
1005	case SCTP_ASCONF_ACK:
1006	case SCTP_ASCONF:
1007		sctp_asconf_cleanup(stcb, net);
1008		break;
1009	case SCTP_FORWARD_CUM_TSN:
1010		stcb->asoc.peer_supports_prsctp = 0;
1011		break;
1012	default:
1013		SCTPDBG(SCTP_DEBUG_INPUT2,
1014		    "Peer does not support chunk type %d(%x)??\n",
1015		    chk->chunk_type, (uint32_t) chk->chunk_type);
1016		break;
1017	}
1018}
1019
1020/*
1021 * Skip past the param header and then we will find the param that caused the
1022 * problem.  There are a number of param's in a ASCONF OR the prsctp param
1023 * these will turn of specific features.
1024 */
1025static void
1026sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1027{
1028	struct sctp_paramhdr *pbad;
1029
1030	pbad = phdr + 1;
1031	switch (ntohs(pbad->param_type)) {
1032		/* pr-sctp draft */
1033	case SCTP_PRSCTP_SUPPORTED:
1034		stcb->asoc.peer_supports_prsctp = 0;
1035		break;
1036	case SCTP_SUPPORTED_CHUNK_EXT:
1037		break;
1038		/* draft-ietf-tsvwg-addip-sctp */
1039	case SCTP_HAS_NAT_SUPPORT:
1040		stcb->asoc.peer_supports_nat = 0;
1041		break;
1042	case SCTP_ECN_NONCE_SUPPORTED:
1043		stcb->asoc.peer_supports_ecn_nonce = 0;
1044		stcb->asoc.ecn_nonce_allowed = 0;
1045		stcb->asoc.ecn_allowed = 0;
1046		break;
1047	case SCTP_ADD_IP_ADDRESS:
1048	case SCTP_DEL_IP_ADDRESS:
1049	case SCTP_SET_PRIM_ADDR:
1050		stcb->asoc.peer_supports_asconf = 0;
1051		break;
1052	case SCTP_SUCCESS_REPORT:
1053	case SCTP_ERROR_CAUSE_IND:
1054		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1055		SCTPDBG(SCTP_DEBUG_INPUT2,
1056		    "Turning off ASCONF to this strange peer\n");
1057		stcb->asoc.peer_supports_asconf = 0;
1058		break;
1059	default:
1060		SCTPDBG(SCTP_DEBUG_INPUT2,
1061		    "Peer does not support param type %d(%x)??\n",
1062		    pbad->param_type, (uint32_t) pbad->param_type);
1063		break;
1064	}
1065}
1066
1067static int
1068sctp_handle_error(struct sctp_chunkhdr *ch,
1069    struct sctp_tcb *stcb, struct sctp_nets *net)
1070{
1071	int chklen;
1072	struct sctp_paramhdr *phdr;
1073	uint16_t error_type;
1074	uint16_t error_len;
1075	struct sctp_association *asoc;
1076	int adjust;
1077
1078#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1079	struct socket *so;
1080
1081#endif
1082
1083	/* parse through all of the errors and process */
1084	asoc = &stcb->asoc;
1085	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1086	    sizeof(struct sctp_chunkhdr));
1087	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1088	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1089		/* Process an Error Cause */
1090		error_type = ntohs(phdr->param_type);
1091		error_len = ntohs(phdr->param_length);
1092		if ((error_len > chklen) || (error_len == 0)) {
1093			/* invalid param length for this param */
1094			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1095			    chklen, error_len);
1096			return (0);
1097		}
1098		switch (error_type) {
1099		case SCTP_CAUSE_INVALID_STREAM:
1100		case SCTP_CAUSE_MISSING_PARAM:
1101		case SCTP_CAUSE_INVALID_PARAM:
1102		case SCTP_CAUSE_NO_USER_DATA:
1103			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1104			    error_type);
1105			break;
1106		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1107			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1108			    ch->chunk_flags);
1109			if (sctp_handle_nat_colliding_state(stcb)) {
1110				return (0);
1111			}
1112			break;
1113		case SCTP_CAUSE_NAT_MISSING_STATE:
1114			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1115			    ch->chunk_flags);
1116			if (sctp_handle_nat_missing_state(stcb, net)) {
1117				return (0);
1118			}
1119			break;
1120		case SCTP_CAUSE_STALE_COOKIE:
1121			/*
1122			 * We only act if we have echoed a cookie and are
1123			 * waiting.
1124			 */
1125			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1126				int *p;
1127
1128				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1129				/* Save the time doubled */
1130				asoc->cookie_preserve_req = ntohl(*p) << 1;
1131				asoc->stale_cookie_count++;
1132				if (asoc->stale_cookie_count >
1133				    asoc->max_init_times) {
1134					sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
1135					/* now free the asoc */
1136#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1137					so = SCTP_INP_SO(stcb->sctp_ep);
1138					atomic_add_int(&stcb->asoc.refcnt, 1);
1139					SCTP_TCB_UNLOCK(stcb);
1140					SCTP_SOCKET_LOCK(so, 1);
1141					SCTP_TCB_LOCK(stcb);
1142					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1143#endif
1144					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1145					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1146#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1147					SCTP_SOCKET_UNLOCK(so, 1);
1148#endif
1149					return (-1);
1150				}
1151				/* blast back to INIT state */
1152				sctp_toss_old_cookies(stcb, &stcb->asoc);
1153				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1154				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1155				sctp_stop_all_cookie_timers(stcb);
1156				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1157			}
1158			break;
1159		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1160			/*
1161			 * Nothing we can do here, we don't do hostname
1162			 * addresses so if the peer does not like my IPv6
1163			 * (or IPv4 for that matter) it does not matter. If
1164			 * they don't support that type of address, they can
1165			 * NOT possibly get that packet type... i.e. with no
1166			 * IPv6 you can't recieve a IPv6 packet. so we can
1167			 * safely ignore this one. If we ever added support
1168			 * for HOSTNAME Addresses, then we would need to do
1169			 * something here.
1170			 */
1171			break;
1172		case SCTP_CAUSE_UNRECOG_CHUNK:
1173			sctp_process_unrecog_chunk(stcb, phdr, net);
1174			break;
1175		case SCTP_CAUSE_UNRECOG_PARAM:
1176			sctp_process_unrecog_param(stcb, phdr);
1177			break;
1178		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1179			/*
1180			 * We ignore this since the timer will drive out a
1181			 * new cookie anyway and there timer will drive us
1182			 * to send a SHUTDOWN_COMPLETE. We can't send one
1183			 * here since we don't have their tag.
1184			 */
1185			break;
1186		case SCTP_CAUSE_DELETING_LAST_ADDR:
1187		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1188		case SCTP_CAUSE_DELETING_SRC_ADDR:
1189			/*
1190			 * We should NOT get these here, but in a
1191			 * ASCONF-ACK.
1192			 */
1193			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1194			    error_type);
1195			break;
1196		case SCTP_CAUSE_OUT_OF_RESC:
1197			/*
1198			 * And what, pray tell do we do with the fact that
1199			 * the peer is out of resources? Not really sure we
1200			 * could do anything but abort. I suspect this
1201			 * should have came WITH an abort instead of in a
1202			 * OP-ERROR.
1203			 */
1204			break;
1205		default:
1206			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1207			    error_type);
1208			break;
1209		}
1210		adjust = SCTP_SIZE32(error_len);
1211		chklen -= adjust;
1212		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1213	}
1214	return (0);
1215}
1216
1217static int
1218sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1219    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1220    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
1221{
1222	struct sctp_init_ack *init_ack;
1223	struct mbuf *op_err;
1224
1225	SCTPDBG(SCTP_DEBUG_INPUT2,
1226	    "sctp_handle_init_ack: handling INIT-ACK\n");
1227
1228	if (stcb == NULL) {
1229		SCTPDBG(SCTP_DEBUG_INPUT2,
1230		    "sctp_handle_init_ack: TCB is null\n");
1231		return (-1);
1232	}
1233	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1234		/* Invalid length */
1235		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1236		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1237		    op_err, 0, net->port);
1238		*abort_no_unlock = 1;
1239		return (-1);
1240	}
1241	init_ack = &cp->init;
1242	/* validate parameters */
1243	if (init_ack->initiate_tag == 0) {
1244		/* protocol error... send an abort */
1245		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1246		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1247		    op_err, 0, net->port);
1248		*abort_no_unlock = 1;
1249		return (-1);
1250	}
1251	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1252		/* protocol error... send an abort */
1253		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1254		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1255		    op_err, 0, net->port);
1256		*abort_no_unlock = 1;
1257		return (-1);
1258	}
1259	if (init_ack->num_inbound_streams == 0) {
1260		/* protocol error... send an abort */
1261		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1262		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1263		    op_err, 0, net->port);
1264		*abort_no_unlock = 1;
1265		return (-1);
1266	}
1267	if (init_ack->num_outbound_streams == 0) {
1268		/* protocol error... send an abort */
1269		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1270		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1271		    op_err, 0, net->port);
1272		*abort_no_unlock = 1;
1273		return (-1);
1274	}
1275	/* process according to association state... */
1276	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1277	case SCTP_STATE_COOKIE_WAIT:
1278		/* this is the expected state for this chunk */
1279		/* process the INIT-ACK parameters */
1280		if (stcb->asoc.primary_destination->dest_state &
1281		    SCTP_ADDR_UNCONFIRMED) {
1282			/*
1283			 * The primary is where we sent the INIT, we can
1284			 * always consider it confirmed when the INIT-ACK is
1285			 * returned. Do this before we load addresses
1286			 * though.
1287			 */
1288			stcb->asoc.primary_destination->dest_state &=
1289			    ~SCTP_ADDR_UNCONFIRMED;
1290			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1291			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1292		}
1293		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
1294		    net, abort_no_unlock, vrf_id) < 0) {
1295			/* error in parsing parameters */
1296			return (-1);
1297		}
1298		/* update our state */
1299		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1300		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1301
1302		/* reset the RTO calc */
1303		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1304			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1305			    stcb->asoc.overall_error_count,
1306			    0,
1307			    SCTP_FROM_SCTP_INPUT,
1308			    __LINE__);
1309		}
1310		stcb->asoc.overall_error_count = 0;
1311		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1312		/*
1313		 * collapse the init timer back in case of a exponential
1314		 * backoff
1315		 */
1316		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1317		    stcb, net);
1318		/*
1319		 * the send at the end of the inbound data processing will
1320		 * cause the cookie to be sent
1321		 */
1322		break;
1323	case SCTP_STATE_SHUTDOWN_SENT:
1324		/* incorrect state... discard */
1325		break;
1326	case SCTP_STATE_COOKIE_ECHOED:
1327		/* incorrect state... discard */
1328		break;
1329	case SCTP_STATE_OPEN:
1330		/* incorrect state... discard */
1331		break;
1332	case SCTP_STATE_EMPTY:
1333	case SCTP_STATE_INUSE:
1334	default:
1335		/* incorrect state... discard */
1336		return (-1);
1337		break;
1338	}
1339	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1340	return (0);
1341}
1342
1343static struct sctp_tcb *
1344sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1345    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1346    struct sctp_inpcb *inp, struct sctp_nets **netp,
1347    struct sockaddr *init_src, int *notification,
1348    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1349    uint32_t vrf_id, uint16_t port);
1350
1351
1352/*
1353 * handle a state cookie for an existing association m: input packet mbuf
1354 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1355 * "split" mbuf and the cookie signature does not exist offset: offset into
1356 * mbuf to the cookie-echo chunk
1357 */
1358static struct sctp_tcb *
1359sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1360    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1361    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1362    struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
1363    uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port)
1364{
1365	struct sctp_association *asoc;
1366	struct sctp_init_chunk *init_cp, init_buf;
1367	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1368	struct sctp_nets *net;
1369	struct mbuf *op_err;
1370	struct sctp_paramhdr *ph;
1371	int chk_length;
1372	int init_offset, initack_offset, i;
1373	int retval;
1374	int spec_flag = 0;
1375	uint32_t how_indx;
1376
1377	net = *netp;
1378	/* I know that the TCB is non-NULL from the caller */
1379	asoc = &stcb->asoc;
1380	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1381		if (asoc->cookie_how[how_indx] == 0)
1382			break;
1383	}
1384	if (how_indx < sizeof(asoc->cookie_how)) {
1385		asoc->cookie_how[how_indx] = 1;
1386	}
1387	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1388		/* SHUTDOWN came in after sending INIT-ACK */
1389		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1390		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1391		    0, M_DONTWAIT, 1, MT_DATA);
1392		if (op_err == NULL) {
1393			/* FOOBAR */
1394			return (NULL);
1395		}
1396		/* Set the len */
1397		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1398		ph = mtod(op_err, struct sctp_paramhdr *);
1399		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1400		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1401		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1402		    vrf_id, net->port);
1403		if (how_indx < sizeof(asoc->cookie_how))
1404			asoc->cookie_how[how_indx] = 2;
1405		return (NULL);
1406	}
1407	/*
1408	 * find and validate the INIT chunk in the cookie (peer's info) the
1409	 * INIT should start after the cookie-echo header struct (chunk
1410	 * header, state cookie header struct)
1411	 */
1412	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1413
1414	init_cp = (struct sctp_init_chunk *)
1415	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1416	    (uint8_t *) & init_buf);
1417	if (init_cp == NULL) {
1418		/* could not pull a INIT chunk in cookie */
1419		return (NULL);
1420	}
1421	chk_length = ntohs(init_cp->ch.chunk_length);
1422	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1423		return (NULL);
1424	}
1425	/*
1426	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1427	 * INIT-ACK follows the INIT chunk
1428	 */
1429	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1430	initack_cp = (struct sctp_init_ack_chunk *)
1431	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1432	    (uint8_t *) & initack_buf);
1433	if (initack_cp == NULL) {
1434		/* could not pull INIT-ACK chunk in cookie */
1435		return (NULL);
1436	}
1437	chk_length = ntohs(initack_cp->ch.chunk_length);
1438	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1439		return (NULL);
1440	}
1441	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1442	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1443		/*
1444		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1445		 * to get into the OPEN state
1446		 */
1447		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1448			/*-
1449			 * Opps, this means that we somehow generated two vtag's
1450			 * the same. I.e. we did:
1451			 *  Us               Peer
1452			 *   <---INIT(tag=a)------
1453			 *   ----INIT-ACK(tag=t)-->
1454			 *   ----INIT(tag=t)------> *1
1455			 *   <---INIT-ACK(tag=a)---
1456                         *   <----CE(tag=t)------------- *2
1457			 *
1458			 * At point *1 we should be generating a different
1459			 * tag t'. Which means we would throw away the CE and send
1460			 * ours instead. Basically this is case C (throw away side).
1461			 */
1462			if (how_indx < sizeof(asoc->cookie_how))
1463				asoc->cookie_how[how_indx] = 17;
1464			return (NULL);
1465
1466		}
1467		switch SCTP_GET_STATE
1468			(asoc) {
1469		case SCTP_STATE_COOKIE_WAIT:
1470		case SCTP_STATE_COOKIE_ECHOED:
1471			/*
1472			 * INIT was sent but got a COOKIE_ECHO with the
1473			 * correct tags... just accept it...but we must
1474			 * process the init so that we can make sure we have
1475			 * the right seq no's.
1476			 */
1477			/* First we must process the INIT !! */
1478			retval = sctp_process_init(init_cp, stcb, net);
1479			if (retval < 0) {
1480				if (how_indx < sizeof(asoc->cookie_how))
1481					asoc->cookie_how[how_indx] = 3;
1482				return (NULL);
1483			}
1484			/* we have already processed the INIT so no problem */
1485			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1486			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1487			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1488			/* update current state */
1489			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1490				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1491			else
1492				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1493
1494			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1495			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1496				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1497				    stcb->sctp_ep, stcb, asoc->primary_destination);
1498			}
1499			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1500			sctp_stop_all_cookie_timers(stcb);
1501			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1502			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1503			    (inp->sctp_socket->so_qlimit == 0)
1504			    ) {
1505#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1506				struct socket *so;
1507
1508#endif
1509				/*
1510				 * Here is where collision would go if we
1511				 * did a connect() and instead got a
1512				 * init/init-ack/cookie done before the
1513				 * init-ack came back..
1514				 */
1515				stcb->sctp_ep->sctp_flags |=
1516				    SCTP_PCB_FLAGS_CONNECTED;
1517#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1518				so = SCTP_INP_SO(stcb->sctp_ep);
1519				atomic_add_int(&stcb->asoc.refcnt, 1);
1520				SCTP_TCB_UNLOCK(stcb);
1521				SCTP_SOCKET_LOCK(so, 1);
1522				SCTP_TCB_LOCK(stcb);
1523				atomic_add_int(&stcb->asoc.refcnt, -1);
1524				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1525					SCTP_SOCKET_UNLOCK(so, 1);
1526					return (NULL);
1527				}
1528#endif
1529				soisconnected(stcb->sctp_socket);
1530#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1531				SCTP_SOCKET_UNLOCK(so, 1);
1532#endif
1533			}
1534			/* notify upper layer */
1535			*notification = SCTP_NOTIFY_ASSOC_UP;
1536			/*
1537			 * since we did not send a HB make sure we don't
1538			 * double things
1539			 */
1540			net->hb_responded = 1;
1541			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1542			    &cookie->time_entered, sctp_align_unsafe_makecopy);
1543
1544			if (stcb->asoc.sctp_autoclose_ticks &&
1545			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1546				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1547				    inp, stcb, NULL);
1548			}
1549			break;
1550		default:
1551			/*
1552			 * we're in the OPEN state (or beyond), so peer must
1553			 * have simply lost the COOKIE-ACK
1554			 */
1555			break;
1556			}	/* end switch */
1557		sctp_stop_all_cookie_timers(stcb);
1558		/*
1559		 * We ignore the return code here.. not sure if we should
1560		 * somehow abort.. but we do have an existing asoc. This
1561		 * really should not fail.
1562		 */
1563		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1564		    init_offset + sizeof(struct sctp_init_chunk),
1565		    initack_offset, sh, init_src)) {
1566			if (how_indx < sizeof(asoc->cookie_how))
1567				asoc->cookie_how[how_indx] = 4;
1568			return (NULL);
1569		}
1570		/* respond with a COOKIE-ACK */
1571		sctp_toss_old_cookies(stcb, asoc);
1572		sctp_send_cookie_ack(stcb);
1573		if (how_indx < sizeof(asoc->cookie_how))
1574			asoc->cookie_how[how_indx] = 5;
1575		return (stcb);
1576	}
1577	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1578	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1579	    cookie->tie_tag_my_vtag == 0 &&
1580	    cookie->tie_tag_peer_vtag == 0) {
1581		/*
1582		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1583		 */
1584		if (how_indx < sizeof(asoc->cookie_how))
1585			asoc->cookie_how[how_indx] = 6;
1586		return (NULL);
1587	}
1588	/*
1589	 * If nat support, and the below and stcb is established, send back
1590	 * a ABORT(colliding state) if we are established.
1591	 */
1592	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
1593	    (asoc->peer_supports_nat) &&
1594	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1595	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1596	    (asoc->peer_vtag == 0)))) {
1597		/*
1598		 * Special case - Peer's support nat. We may have two init's
1599		 * that we gave out the same tag on since one was not
1600		 * established.. i.e. we get INIT from host-1 behind the nat
1601		 * and we respond tag-a, we get a INIT from host-2 behind
1602		 * the nat and we get tag-a again. Then we bring up host-1
1603		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1604		 * Now we have colliding state. We must send an abort here
1605		 * with colliding state indication.
1606		 */
1607		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1608		    0, M_DONTWAIT, 1, MT_DATA);
1609		if (op_err == NULL) {
1610			/* FOOBAR */
1611			return (NULL);
1612		}
1613		/* pre-reserve some space */
1614#ifdef INET6
1615		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1616#else
1617		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
1618#endif
1619		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1620		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1621		/* Set the len */
1622		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1623		ph = mtod(op_err, struct sctp_paramhdr *);
1624		ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
1625		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1626		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
1627		return (NULL);
1628	}
1629	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1630	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1631	    (asoc->peer_vtag == 0))) {
1632		/*
1633		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1634		 * should be ok, re-accept peer info
1635		 */
1636		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1637			/*
1638			 * Extension of case C. If we hit this, then the
1639			 * random number generator returned the same vtag
1640			 * when we first sent our INIT-ACK and when we later
1641			 * sent our INIT. The side with the seq numbers that
1642			 * are different will be the one that normnally
1643			 * would have hit case C. This in effect "extends"
1644			 * our vtags in this collision case to be 64 bits.
1645			 * The same collision could occur aka you get both
1646			 * vtag and seq number the same twice in a row.. but
1647			 * is much less likely. If it did happen then we
1648			 * would proceed through and bring up the assoc.. we
1649			 * may end up with the wrong stream setup however..
1650			 * which would be bad.. but there is no way to
1651			 * tell.. until we send on a stream that does not
1652			 * exist :-)
1653			 */
1654			if (how_indx < sizeof(asoc->cookie_how))
1655				asoc->cookie_how[how_indx] = 7;
1656
1657			return (NULL);
1658		}
1659		if (how_indx < sizeof(asoc->cookie_how))
1660			asoc->cookie_how[how_indx] = 8;
1661		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1662		sctp_stop_all_cookie_timers(stcb);
1663		/*
1664		 * since we did not send a HB make sure we don't double
1665		 * things
1666		 */
1667		net->hb_responded = 1;
1668		if (stcb->asoc.sctp_autoclose_ticks &&
1669		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1670			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1671			    NULL);
1672		}
1673		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1674		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1675
1676		/* Note last_cwr_tsn? where is this used? */
1677		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1678		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1679			/*
1680			 * Ok the peer probably discarded our data (if we
1681			 * echoed a cookie+data). So anything on the
1682			 * sent_queue should be marked for retransmit, we
1683			 * may not get something to kick us so it COULD
1684			 * still take a timeout to move these.. but it can't
1685			 * hurt to mark them.
1686			 */
1687			struct sctp_tmit_chunk *chk;
1688
1689			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1690				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1691					chk->sent = SCTP_DATAGRAM_RESEND;
1692					sctp_flight_size_decrease(chk);
1693					sctp_total_flight_decrease(stcb, chk);
1694					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1695					spec_flag++;
1696				}
1697			}
1698
1699		}
1700		/* process the INIT info (peer's info) */
1701		retval = sctp_process_init(init_cp, stcb, net);
1702		if (retval < 0) {
1703			if (how_indx < sizeof(asoc->cookie_how))
1704				asoc->cookie_how[how_indx] = 9;
1705			return (NULL);
1706		}
1707		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1708		    init_offset + sizeof(struct sctp_init_chunk),
1709		    initack_offset, sh, init_src)) {
1710			if (how_indx < sizeof(asoc->cookie_how))
1711				asoc->cookie_how[how_indx] = 10;
1712			return (NULL);
1713		}
1714		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1715		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1716			*notification = SCTP_NOTIFY_ASSOC_UP;
1717
1718			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1719			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1720			    (inp->sctp_socket->so_qlimit == 0)) {
1721#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1722				struct socket *so;
1723
1724#endif
1725				stcb->sctp_ep->sctp_flags |=
1726				    SCTP_PCB_FLAGS_CONNECTED;
1727#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1728				so = SCTP_INP_SO(stcb->sctp_ep);
1729				atomic_add_int(&stcb->asoc.refcnt, 1);
1730				SCTP_TCB_UNLOCK(stcb);
1731				SCTP_SOCKET_LOCK(so, 1);
1732				SCTP_TCB_LOCK(stcb);
1733				atomic_add_int(&stcb->asoc.refcnt, -1);
1734				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1735					SCTP_SOCKET_UNLOCK(so, 1);
1736					return (NULL);
1737				}
1738#endif
1739				soisconnected(stcb->sctp_socket);
1740#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1741				SCTP_SOCKET_UNLOCK(so, 1);
1742#endif
1743			}
1744			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1745				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1746			else
1747				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1748			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1749		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1750			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1751		} else {
1752			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1753		}
1754		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1755		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1756			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1757			    stcb->sctp_ep, stcb, asoc->primary_destination);
1758		}
1759		sctp_stop_all_cookie_timers(stcb);
1760		sctp_toss_old_cookies(stcb, asoc);
1761		sctp_send_cookie_ack(stcb);
1762		if (spec_flag) {
1763			/*
1764			 * only if we have retrans set do we do this. What
1765			 * this call does is get only the COOKIE-ACK out and
1766			 * then when we return the normal call to
1767			 * sctp_chunk_output will get the retrans out behind
1768			 * this.
1769			 */
1770			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1771		}
1772		if (how_indx < sizeof(asoc->cookie_how))
1773			asoc->cookie_how[how_indx] = 11;
1774
1775		return (stcb);
1776	}
1777	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1778	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1779	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1780	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1781	    cookie->tie_tag_peer_vtag != 0) {
1782		struct sctpasochead *head;
1783
1784		if (asoc->peer_supports_nat) {
1785			/*
1786			 * This is a gross gross hack. just call the
1787			 * cookie_new code since we are allowing a duplicate
1788			 * association. I hope this works...
1789			 */
1790			return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len,
1791			    inp, netp, init_src, notification,
1792			    auth_skipped, auth_offset, auth_len,
1793			    vrf_id, port));
1794		}
1795		/*
1796		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1797		 */
1798		/* temp code */
1799		if (how_indx < sizeof(asoc->cookie_how))
1800			asoc->cookie_how[how_indx] = 12;
1801		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1802		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1803
1804		*sac_assoc_id = sctp_get_associd(stcb);
1805		/* notify upper layer */
1806		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1807		atomic_add_int(&stcb->asoc.refcnt, 1);
1808		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1809		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1810		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1811			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1812		}
1813		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1814			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1815		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1816			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1817		}
1818		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1819			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1820			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1821			    stcb->sctp_ep, stcb, asoc->primary_destination);
1822
1823		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1824			/* move to OPEN state, if not in SHUTDOWN_SENT */
1825			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1826		}
1827		asoc->pre_open_streams =
1828		    ntohs(initack_cp->init.num_outbound_streams);
1829		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1830		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1831		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1832
1833		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1834		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1835
1836		asoc->str_reset_seq_in = asoc->init_seq_number;
1837
1838		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1839		if (asoc->mapping_array) {
1840			memset(asoc->mapping_array, 0,
1841			    asoc->mapping_array_size);
1842		}
1843		if (asoc->nr_mapping_array) {
1844			memset(asoc->nr_mapping_array, 0,
1845			    asoc->mapping_array_size);
1846		}
1847		SCTP_TCB_UNLOCK(stcb);
1848		SCTP_INP_INFO_WLOCK();
1849		SCTP_INP_WLOCK(stcb->sctp_ep);
1850		SCTP_TCB_LOCK(stcb);
1851		atomic_add_int(&stcb->asoc.refcnt, -1);
1852		/* send up all the data */
1853		SCTP_TCB_SEND_LOCK(stcb);
1854
1855		sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED);
1856		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1857			stcb->asoc.strmout[i].stream_no = i;
1858			stcb->asoc.strmout[i].next_sequence_sent = 0;
1859			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1860		}
1861		/* process the INIT-ACK info (my info) */
1862		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1863		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1864
1865		/* pull from vtag hash */
1866		LIST_REMOVE(stcb, sctp_asocs);
1867		/* re-insert to new vtag position */
1868		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1869		    SCTP_BASE_INFO(hashasocmark))];
1870		/*
1871		 * put it in the bucket in the vtag hash of assoc's for the
1872		 * system
1873		 */
1874		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1875
1876		/* process the INIT info (peer's info) */
1877		SCTP_TCB_SEND_UNLOCK(stcb);
1878		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1879		SCTP_INP_INFO_WUNLOCK();
1880
1881		retval = sctp_process_init(init_cp, stcb, net);
1882		if (retval < 0) {
1883			if (how_indx < sizeof(asoc->cookie_how))
1884				asoc->cookie_how[how_indx] = 13;
1885
1886			return (NULL);
1887		}
1888		/*
1889		 * since we did not send a HB make sure we don't double
1890		 * things
1891		 */
1892		net->hb_responded = 1;
1893
1894		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1895		    init_offset + sizeof(struct sctp_init_chunk),
1896		    initack_offset, sh, init_src)) {
1897			if (how_indx < sizeof(asoc->cookie_how))
1898				asoc->cookie_how[how_indx] = 14;
1899
1900			return (NULL);
1901		}
1902		/* respond with a COOKIE-ACK */
1903		sctp_stop_all_cookie_timers(stcb);
1904		sctp_toss_old_cookies(stcb, asoc);
1905		sctp_send_cookie_ack(stcb);
1906		if (how_indx < sizeof(asoc->cookie_how))
1907			asoc->cookie_how[how_indx] = 15;
1908
1909		return (stcb);
1910	}
1911	if (how_indx < sizeof(asoc->cookie_how))
1912		asoc->cookie_how[how_indx] = 16;
1913	/* all other cases... */
1914	return (NULL);
1915}
1916
1917
1918/*
1919 * handle a state cookie for a new association m: input packet mbuf chain--
1920 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1921 * and the cookie signature does not exist offset: offset into mbuf to the
1922 * cookie-echo chunk length: length of the cookie chunk to: where the init
1923 * was from returns a new TCB
1924 */
1925struct sctp_tcb *
1926sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1927    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1928    struct sctp_inpcb *inp, struct sctp_nets **netp,
1929    struct sockaddr *init_src, int *notification,
1930    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1931    uint32_t vrf_id, uint16_t port)
1932{
1933	struct sctp_tcb *stcb;
1934	struct sctp_init_chunk *init_cp, init_buf;
1935	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1936	struct sockaddr_storage sa_store;
1937	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1938	struct sockaddr_in *sin;
1939	struct sockaddr_in6 *sin6;
1940	struct sctp_association *asoc;
1941	int chk_length;
1942	int init_offset, initack_offset, initack_limit;
1943	int retval;
1944	int error = 0;
1945	uint32_t old_tag;
1946	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1947
1948#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1949	struct socket *so;
1950
1951	so = SCTP_INP_SO(inp);
1952#endif
1953
1954	/*
1955	 * find and validate the INIT chunk in the cookie (peer's info) the
1956	 * INIT should start after the cookie-echo header struct (chunk
1957	 * header, state cookie header struct)
1958	 */
1959	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1960	init_cp = (struct sctp_init_chunk *)
1961	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1962	    (uint8_t *) & init_buf);
1963	if (init_cp == NULL) {
1964		/* could not pull a INIT chunk in cookie */
1965		SCTPDBG(SCTP_DEBUG_INPUT1,
1966		    "process_cookie_new: could not pull INIT chunk hdr\n");
1967		return (NULL);
1968	}
1969	chk_length = ntohs(init_cp->ch.chunk_length);
1970	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1971		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1972		return (NULL);
1973	}
1974	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1975	/*
1976	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1977	 * INIT-ACK follows the INIT chunk
1978	 */
1979	initack_cp = (struct sctp_init_ack_chunk *)
1980	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1981	    (uint8_t *) & initack_buf);
1982	if (initack_cp == NULL) {
1983		/* could not pull INIT-ACK chunk in cookie */
1984		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1985		return (NULL);
1986	}
1987	chk_length = ntohs(initack_cp->ch.chunk_length);
1988	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1989		return (NULL);
1990	}
1991	/*
1992	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1993	 * "initack_limit" value.  This is because the chk_length field
1994	 * includes the length of the cookie, but the cookie is omitted when
1995	 * the INIT and INIT_ACK are tacked onto the cookie...
1996	 */
1997	initack_limit = offset + cookie_len;
1998
1999	/*
2000	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2001	 * and popluate
2002	 */
2003
2004	/*
2005	 * Here we do a trick, we set in NULL for the proc/thread argument.
2006	 * We do this since in effect we only use the p argument when the
2007	 * socket is unbound and we must do an implicit bind. Since we are
2008	 * getting a cookie, we cannot be unbound.
2009	 */
2010	stcb = sctp_aloc_assoc(inp, init_src, &error,
2011	    ntohl(initack_cp->init.initiate_tag), vrf_id,
2012	    (struct thread *)NULL
2013	    );
2014	if (stcb == NULL) {
2015		struct mbuf *op_err;
2016
2017		/* memory problem? */
2018		SCTPDBG(SCTP_DEBUG_INPUT1,
2019		    "process_cookie_new: no room for another TCB!\n");
2020		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2021
2022		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2023		    sh, op_err, vrf_id, port);
2024		return (NULL);
2025	}
2026	/* get the correct sctp_nets */
2027	if (netp)
2028		*netp = sctp_findnet(stcb, init_src);
2029
2030	asoc = &stcb->asoc;
2031	/* get scope variables out of cookie */
2032	asoc->ipv4_local_scope = cookie->ipv4_scope;
2033	asoc->site_scope = cookie->site_scope;
2034	asoc->local_scope = cookie->local_scope;
2035	asoc->loopback_scope = cookie->loopback_scope;
2036
2037	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2038	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2039		struct mbuf *op_err;
2040
2041		/*
2042		 * Houston we have a problem. The EP changed while the
2043		 * cookie was in flight. Only recourse is to abort the
2044		 * association.
2045		 */
2046		atomic_add_int(&stcb->asoc.refcnt, 1);
2047		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2048		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2049		    sh, op_err, vrf_id, port);
2050#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2051		SCTP_TCB_UNLOCK(stcb);
2052		SCTP_SOCKET_LOCK(so, 1);
2053		SCTP_TCB_LOCK(stcb);
2054#endif
2055		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2056		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2057#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2058		SCTP_SOCKET_UNLOCK(so, 1);
2059#endif
2060		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2061		return (NULL);
2062	}
2063	/* process the INIT-ACK info (my info) */
2064	old_tag = asoc->my_vtag;
2065	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2066	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2067	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2068	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2069	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2070	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2071	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
2072	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2073	asoc->str_reset_seq_in = asoc->init_seq_number;
2074
2075	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2076
2077	/* process the INIT info (peer's info) */
2078	if (netp)
2079		retval = sctp_process_init(init_cp, stcb, *netp);
2080	else
2081		retval = 0;
2082	if (retval < 0) {
2083		atomic_add_int(&stcb->asoc.refcnt, 1);
2084#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2085		SCTP_TCB_UNLOCK(stcb);
2086		SCTP_SOCKET_LOCK(so, 1);
2087		SCTP_TCB_LOCK(stcb);
2088#endif
2089		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2090#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2091		SCTP_SOCKET_UNLOCK(so, 1);
2092#endif
2093		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2094		return (NULL);
2095	}
2096	/* load all addresses */
2097	if (sctp_load_addresses_from_init(stcb, m, iphlen,
2098	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
2099	    init_src)) {
2100		atomic_add_int(&stcb->asoc.refcnt, 1);
2101#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2102		SCTP_TCB_UNLOCK(stcb);
2103		SCTP_SOCKET_LOCK(so, 1);
2104		SCTP_TCB_LOCK(stcb);
2105#endif
2106		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2107#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2108		SCTP_SOCKET_UNLOCK(so, 1);
2109#endif
2110		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2111		return (NULL);
2112	}
2113	/*
2114	 * verify any preceding AUTH chunk that was skipped
2115	 */
2116	/* pull the local authentication parameters from the cookie/init-ack */
2117	sctp_auth_get_cookie_params(stcb, m,
2118	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2119	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2120	if (auth_skipped) {
2121		struct sctp_auth_chunk *auth;
2122
2123		auth = (struct sctp_auth_chunk *)
2124		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2125		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2126			/* auth HMAC failed, dump the assoc and packet */
2127			SCTPDBG(SCTP_DEBUG_AUTH1,
2128			    "COOKIE-ECHO: AUTH failed\n");
2129			atomic_add_int(&stcb->asoc.refcnt, 1);
2130#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2131			SCTP_TCB_UNLOCK(stcb);
2132			SCTP_SOCKET_LOCK(so, 1);
2133			SCTP_TCB_LOCK(stcb);
2134#endif
2135			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2136#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2137			SCTP_SOCKET_UNLOCK(so, 1);
2138#endif
2139			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2140			return (NULL);
2141		} else {
2142			/* remaining chunks checked... good to go */
2143			stcb->asoc.authenticated = 1;
2144		}
2145	}
2146	/* update current state */
2147	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2148	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2149	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2150		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2151		    stcb->sctp_ep, stcb, asoc->primary_destination);
2152	}
2153	sctp_stop_all_cookie_timers(stcb);
2154	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2155	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2156
2157	/*
2158	 * if we're doing ASCONFs, check to see if we have any new local
2159	 * addresses that need to get added to the peer (eg. addresses
2160	 * changed while cookie echo in flight).  This needs to be done
2161	 * after we go to the OPEN state to do the correct asconf
2162	 * processing. else, make sure we have the correct addresses in our
2163	 * lists
2164	 */
2165
2166	/* warning, we re-use sin, sin6, sa_store here! */
2167	/* pull in local_address (our "from" address) */
2168	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
2169		/* source addr is IPv4 */
2170		sin = (struct sockaddr_in *)initack_src;
2171		memset(sin, 0, sizeof(*sin));
2172		sin->sin_family = AF_INET;
2173		sin->sin_len = sizeof(struct sockaddr_in);
2174		sin->sin_addr.s_addr = cookie->laddress[0];
2175	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
2176		/* source addr is IPv6 */
2177		sin6 = (struct sockaddr_in6 *)initack_src;
2178		memset(sin6, 0, sizeof(*sin6));
2179		sin6->sin6_family = AF_INET6;
2180		sin6->sin6_len = sizeof(struct sockaddr_in6);
2181		sin6->sin6_scope_id = cookie->scope_id;
2182		memcpy(&sin6->sin6_addr, cookie->laddress,
2183		    sizeof(sin6->sin6_addr));
2184	} else {
2185		atomic_add_int(&stcb->asoc.refcnt, 1);
2186#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2187		SCTP_TCB_UNLOCK(stcb);
2188		SCTP_SOCKET_LOCK(so, 1);
2189		SCTP_TCB_LOCK(stcb);
2190#endif
2191		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2192#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2193		SCTP_SOCKET_UNLOCK(so, 1);
2194#endif
2195		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2196		return (NULL);
2197	}
2198
2199	/* set up to notify upper layer */
2200	*notification = SCTP_NOTIFY_ASSOC_UP;
2201	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2202	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2203	    (inp->sctp_socket->so_qlimit == 0)) {
2204		/*
2205		 * This is an endpoint that called connect() how it got a
2206		 * cookie that is NEW is a bit of a mystery. It must be that
2207		 * the INIT was sent, but before it got there.. a complete
2208		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2209		 * should have went to the other code.. not here.. oh well..
2210		 * a bit of protection is worth having..
2211		 */
2212		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2213#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2214		atomic_add_int(&stcb->asoc.refcnt, 1);
2215		SCTP_TCB_UNLOCK(stcb);
2216		SCTP_SOCKET_LOCK(so, 1);
2217		SCTP_TCB_LOCK(stcb);
2218		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2219		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2220			SCTP_SOCKET_UNLOCK(so, 1);
2221			return (NULL);
2222		}
2223#endif
2224		soisconnected(stcb->sctp_socket);
2225#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2226		SCTP_SOCKET_UNLOCK(so, 1);
2227#endif
2228	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2229	    (inp->sctp_socket->so_qlimit)) {
2230		/*
2231		 * We don't want to do anything with this one. Since it is
2232		 * the listening guy. The timer will get started for
2233		 * accepted connections in the caller.
2234		 */
2235		;
2236	}
2237	/* since we did not send a HB make sure we don't double things */
2238	if ((netp) && (*netp))
2239		(*netp)->hb_responded = 1;
2240
2241	if (stcb->asoc.sctp_autoclose_ticks &&
2242	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2243		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2244	}
2245	/* calculate the RTT */
2246	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2247	if ((netp) && (*netp)) {
2248		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2249		    &cookie->time_entered, sctp_align_unsafe_makecopy);
2250	}
2251	/* respond with a COOKIE-ACK */
2252	sctp_send_cookie_ack(stcb);
2253
2254	/*
2255	 * check the address lists for any ASCONFs that need to be sent
2256	 * AFTER the cookie-ack is sent
2257	 */
2258	sctp_check_address_list(stcb, m,
2259	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2260	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2261	    initack_src, cookie->local_scope, cookie->site_scope,
2262	    cookie->ipv4_scope, cookie->loopback_scope);
2263
2264
2265	return (stcb);
2266}
2267
2268/*
2269 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2270 * we NEED to make sure we are not already using the vtag. If so we
2271 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2272	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2273							    SCTP_BASE_INFO(hashasocmark))];
2274	LIST_FOREACH(stcb, head, sctp_asocs) {
2275	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2276		       -- SEND ABORT - TRY AGAIN --
2277		}
2278	}
2279*/
2280
2281/*
2282 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2283 * existing (non-NULL) TCB
2284 */
2285static struct mbuf *
2286sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2287    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2288    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2289    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2290    struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port)
2291{
2292	struct sctp_state_cookie *cookie;
2293	struct sockaddr_in6 sin6;
2294	struct sockaddr_in sin;
2295	struct sctp_tcb *l_stcb = *stcb;
2296	struct sctp_inpcb *l_inp;
2297	struct sockaddr *to;
2298	sctp_assoc_t sac_restart_id;
2299	struct sctp_pcb *ep;
2300	struct mbuf *m_sig;
2301	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2302	uint8_t *sig;
2303	uint8_t cookie_ok = 0;
2304	unsigned int size_of_pkt, sig_offset, cookie_offset;
2305	unsigned int cookie_len;
2306	struct timeval now;
2307	struct timeval time_expires;
2308	struct sockaddr_storage dest_store;
2309	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
2310	struct ip *iph;
2311	int notification = 0;
2312	struct sctp_nets *netl;
2313	int had_a_existing_tcb = 0;
2314
2315	SCTPDBG(SCTP_DEBUG_INPUT2,
2316	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2317
2318	if (inp_p == NULL) {
2319		return (NULL);
2320	}
2321	/* First get the destination address setup too. */
2322	iph = mtod(m, struct ip *);
2323	switch (iph->ip_v) {
2324	case IPVERSION:
2325		{
2326			/* its IPv4 */
2327			struct sockaddr_in *lsin;
2328
2329			lsin = (struct sockaddr_in *)(localep_sa);
2330			memset(lsin, 0, sizeof(*lsin));
2331			lsin->sin_family = AF_INET;
2332			lsin->sin_len = sizeof(*lsin);
2333			lsin->sin_port = sh->dest_port;
2334			lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
2335			size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
2336			break;
2337		}
2338#ifdef INET6
2339	case IPV6_VERSION >> 4:
2340		{
2341			/* its IPv6 */
2342			struct ip6_hdr *ip6;
2343			struct sockaddr_in6 *lsin6;
2344
2345			lsin6 = (struct sockaddr_in6 *)(localep_sa);
2346			memset(lsin6, 0, sizeof(*lsin6));
2347			lsin6->sin6_family = AF_INET6;
2348			lsin6->sin6_len = sizeof(struct sockaddr_in6);
2349			ip6 = mtod(m, struct ip6_hdr *);
2350			lsin6->sin6_port = sh->dest_port;
2351			lsin6->sin6_addr = ip6->ip6_dst;
2352			size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
2353			break;
2354		}
2355#endif
2356	default:
2357		return (NULL);
2358	}
2359
2360	cookie = &cp->cookie;
2361	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2362	cookie_len = ntohs(cp->ch.chunk_length);
2363
2364	if ((cookie->peerport != sh->src_port) &&
2365	    (cookie->myport != sh->dest_port) &&
2366	    (cookie->my_vtag != sh->v_tag)) {
2367		/*
2368		 * invalid ports or bad tag.  Note that we always leave the
2369		 * v_tag in the header in network order and when we stored
2370		 * it in the my_vtag slot we also left it in network order.
2371		 * This maintains the match even though it may be in the
2372		 * opposite byte order of the machine :->
2373		 */
2374		return (NULL);
2375	}
2376	if (cookie_len > size_of_pkt ||
2377	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2378	    sizeof(struct sctp_init_chunk) +
2379	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2380		/* cookie too long!  or too small */
2381		return (NULL);
2382	}
2383	/*
2384	 * split off the signature into its own mbuf (since it should not be
2385	 * calculated in the sctp_hmac_m() call).
2386	 */
2387	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2388	if (sig_offset > size_of_pkt) {
2389		/* packet not correct size! */
2390		/* XXX this may already be accounted for earlier... */
2391		return (NULL);
2392	}
2393	m_sig = m_split(m, sig_offset, M_DONTWAIT);
2394	if (m_sig == NULL) {
2395		/* out of memory or ?? */
2396		return (NULL);
2397	}
2398#ifdef SCTP_MBUF_LOGGING
2399	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2400		struct mbuf *mat;
2401
2402		mat = m_sig;
2403		while (mat) {
2404			if (SCTP_BUF_IS_EXTENDED(mat)) {
2405				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2406			}
2407			mat = SCTP_BUF_NEXT(mat);
2408		}
2409	}
2410#endif
2411
2412	/*
2413	 * compute the signature/digest for the cookie
2414	 */
2415	ep = &(*inp_p)->sctp_ep;
2416	l_inp = *inp_p;
2417	if (l_stcb) {
2418		SCTP_TCB_UNLOCK(l_stcb);
2419	}
2420	SCTP_INP_RLOCK(l_inp);
2421	if (l_stcb) {
2422		SCTP_TCB_LOCK(l_stcb);
2423	}
2424	/* which cookie is it? */
2425	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2426	    (ep->current_secret_number != ep->last_secret_number)) {
2427		/* it's the old cookie */
2428		(void)sctp_hmac_m(SCTP_HMAC,
2429		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2430		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2431	} else {
2432		/* it's the current cookie */
2433		(void)sctp_hmac_m(SCTP_HMAC,
2434		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2435		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2436	}
2437	/* get the signature */
2438	SCTP_INP_RUNLOCK(l_inp);
2439	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2440	if (sig == NULL) {
2441		/* couldn't find signature */
2442		sctp_m_freem(m_sig);
2443		return (NULL);
2444	}
2445	/* compare the received digest with the computed digest */
2446	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2447		/* try the old cookie? */
2448		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2449		    (ep->current_secret_number != ep->last_secret_number)) {
2450			/* compute digest with old */
2451			(void)sctp_hmac_m(SCTP_HMAC,
2452			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2453			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2454			/* compare */
2455			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2456				cookie_ok = 1;
2457		}
2458	} else {
2459		cookie_ok = 1;
2460	}
2461
2462	/*
2463	 * Now before we continue we must reconstruct our mbuf so that
2464	 * normal processing of any other chunks will work.
2465	 */
2466	{
2467		struct mbuf *m_at;
2468
2469		m_at = m;
2470		while (SCTP_BUF_NEXT(m_at) != NULL) {
2471			m_at = SCTP_BUF_NEXT(m_at);
2472		}
2473		SCTP_BUF_NEXT(m_at) = m_sig;
2474	}
2475
2476	if (cookie_ok == 0) {
2477		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2478		SCTPDBG(SCTP_DEBUG_INPUT2,
2479		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2480		    (uint32_t) offset, cookie_offset, sig_offset);
2481		return (NULL);
2482	}
2483	/*
2484	 * check the cookie timestamps to be sure it's not stale
2485	 */
2486	(void)SCTP_GETTIME_TIMEVAL(&now);
2487	/* Expire time is in Ticks, so we convert to seconds */
2488	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2489	time_expires.tv_usec = cookie->time_entered.tv_usec;
2490	/*
2491	 * TODO sctp_constants.h needs alternative time macros when _KERNEL
2492	 * is undefined.
2493	 */
2494	if (timevalcmp(&now, &time_expires, >)) {
2495		/* cookie is stale! */
2496		struct mbuf *op_err;
2497		struct sctp_stale_cookie_msg *scm;
2498		uint32_t tim;
2499
2500		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2501		    0, M_DONTWAIT, 1, MT_DATA);
2502		if (op_err == NULL) {
2503			/* FOOBAR */
2504			return (NULL);
2505		}
2506		/* Set the len */
2507		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2508		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2509		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2510		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2511		    (sizeof(uint32_t))));
2512		/* seconds to usec */
2513		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2514		/* add in usec */
2515		if (tim == 0)
2516			tim = now.tv_usec - cookie->time_entered.tv_usec;
2517		scm->time_usec = htonl(tim);
2518		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
2519		    vrf_id, port);
2520		return (NULL);
2521	}
2522	/*
2523	 * Now we must see with the lookup address if we have an existing
2524	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2525	 * and a INIT collided with us and somewhere the peer sent the
2526	 * cookie on another address besides the single address our assoc
2527	 * had for him. In this case we will have one of the tie-tags set at
2528	 * least AND the address field in the cookie can be used to look it
2529	 * up.
2530	 */
2531	to = NULL;
2532	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
2533		memset(&sin6, 0, sizeof(sin6));
2534		sin6.sin6_family = AF_INET6;
2535		sin6.sin6_len = sizeof(sin6);
2536		sin6.sin6_port = sh->src_port;
2537		sin6.sin6_scope_id = cookie->scope_id;
2538		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2539		    sizeof(sin6.sin6_addr.s6_addr));
2540		to = (struct sockaddr *)&sin6;
2541	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
2542		memset(&sin, 0, sizeof(sin));
2543		sin.sin_family = AF_INET;
2544		sin.sin_len = sizeof(sin);
2545		sin.sin_port = sh->src_port;
2546		sin.sin_addr.s_addr = cookie->address[0];
2547		to = (struct sockaddr *)&sin;
2548	} else {
2549		/* This should not happen */
2550		return (NULL);
2551	}
2552	if ((*stcb == NULL) && to) {
2553		/* Yep, lets check */
2554		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
2555		if (*stcb == NULL) {
2556			/*
2557			 * We should have only got back the same inp. If we
2558			 * got back a different ep we have a problem. The
2559			 * original findep got back l_inp and now
2560			 */
2561			if (l_inp != *inp_p) {
2562				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2563			}
2564		} else {
2565			if (*locked_tcb == NULL) {
2566				/*
2567				 * In this case we found the assoc only
2568				 * after we locked the create lock. This
2569				 * means we are in a colliding case and we
2570				 * must make sure that we unlock the tcb if
2571				 * its one of the cases where we throw away
2572				 * the incoming packets.
2573				 */
2574				*locked_tcb = *stcb;
2575
2576				/*
2577				 * We must also increment the inp ref count
2578				 * since the ref_count flags was set when we
2579				 * did not find the TCB, now we found it
2580				 * which reduces the refcount.. we must
2581				 * raise it back out to balance it all :-)
2582				 */
2583				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2584				if ((*stcb)->sctp_ep != l_inp) {
2585					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2586					    (*stcb)->sctp_ep, l_inp);
2587				}
2588			}
2589		}
2590	}
2591	if (to == NULL) {
2592		return (NULL);
2593	}
2594	cookie_len -= SCTP_SIGNATURE_SIZE;
2595	if (*stcb == NULL) {
2596		/* this is the "normal" case... get a new TCB */
2597		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2598		    cookie_len, *inp_p, netp, to, &notification,
2599		    auth_skipped, auth_offset, auth_len, vrf_id, port);
2600	} else {
2601		/* this is abnormal... cookie-echo on existing TCB */
2602		had_a_existing_tcb = 1;
2603		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2604		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2605		    &notification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port);
2606	}
2607
2608	if (*stcb == NULL) {
2609		/* still no TCB... must be bad cookie-echo */
2610		return (NULL);
2611	}
2612	/*
2613	 * Ok, we built an association so confirm the address we sent the
2614	 * INIT-ACK to.
2615	 */
2616	netl = sctp_findnet(*stcb, to);
2617	/*
2618	 * This code should in theory NOT run but
2619	 */
2620	if (netl == NULL) {
2621		/* TSNH! Huh, why do I need to add this address here? */
2622		int ret;
2623
2624		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2625		    SCTP_IN_COOKIE_PROC);
2626		netl = sctp_findnet(*stcb, to);
2627	}
2628	if (netl) {
2629		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2630			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2631			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2632			    netl);
2633			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2634			    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2635		}
2636	}
2637	if (*stcb) {
2638		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2639		    *stcb, NULL);
2640	}
2641	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2642		if (!had_a_existing_tcb ||
2643		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2644			/*
2645			 * If we have a NEW cookie or the connect never
2646			 * reached the connected state during collision we
2647			 * must do the TCP accept thing.
2648			 */
2649			struct socket *so, *oso;
2650			struct sctp_inpcb *inp;
2651
2652			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2653				/*
2654				 * For a restart we will keep the same
2655				 * socket, no need to do anything. I THINK!!
2656				 */
2657				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED);
2658				return (m);
2659			}
2660			oso = (*inp_p)->sctp_socket;
2661			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2662			SCTP_TCB_UNLOCK((*stcb));
2663			so = sonewconn(oso, 0
2664			    );
2665			SCTP_TCB_LOCK((*stcb));
2666			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2667
2668			if (so == NULL) {
2669				struct mbuf *op_err;
2670
2671#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2672				struct socket *pcb_so;
2673
2674#endif
2675				/* Too many sockets */
2676				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2677				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2678				sctp_abort_association(*inp_p, NULL, m, iphlen,
2679				    sh, op_err, vrf_id, port);
2680#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2681				pcb_so = SCTP_INP_SO(*inp_p);
2682				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2683				SCTP_TCB_UNLOCK((*stcb));
2684				SCTP_SOCKET_LOCK(pcb_so, 1);
2685				SCTP_TCB_LOCK((*stcb));
2686				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2687#endif
2688				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2689#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2690				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2691#endif
2692				return (NULL);
2693			}
2694			inp = (struct sctp_inpcb *)so->so_pcb;
2695			SCTP_INP_INCR_REF(inp);
2696			/*
2697			 * We add the unbound flag here so that if we get an
2698			 * soabort() before we get the move_pcb done, we
2699			 * will properly cleanup.
2700			 */
2701			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2702			    SCTP_PCB_FLAGS_CONNECTED |
2703			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2704			    SCTP_PCB_FLAGS_UNBOUND |
2705			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2706			    SCTP_PCB_FLAGS_DONT_WAKE);
2707			inp->sctp_features = (*inp_p)->sctp_features;
2708			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2709			inp->sctp_socket = so;
2710			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2711			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
2712			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2713			inp->sctp_context = (*inp_p)->sctp_context;
2714			inp->inp_starting_point_for_iterator = NULL;
2715			/*
2716			 * copy in the authentication parameters from the
2717			 * original endpoint
2718			 */
2719			if (inp->sctp_ep.local_hmacs)
2720				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2721			inp->sctp_ep.local_hmacs =
2722			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2723			if (inp->sctp_ep.local_auth_chunks)
2724				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2725			inp->sctp_ep.local_auth_chunks =
2726			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2727
2728			/*
2729			 * Now we must move it from one hash table to
2730			 * another and get the tcb in the right place.
2731			 */
2732
2733			/*
2734			 * This is where the one-2-one socket is put into
2735			 * the accept state waiting for the accept!
2736			 */
2737			if (*stcb) {
2738				(*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE;
2739			}
2740			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2741
2742			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2743			SCTP_TCB_UNLOCK((*stcb));
2744
2745			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2746			    0);
2747			SCTP_TCB_LOCK((*stcb));
2748			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2749
2750
2751			/*
2752			 * now we must check to see if we were aborted while
2753			 * the move was going on and the lock/unlock
2754			 * happened.
2755			 */
2756			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2757				/*
2758				 * yep it was, we leave the assoc attached
2759				 * to the socket since the sctp_inpcb_free()
2760				 * call will send an abort for us.
2761				 */
2762				SCTP_INP_DECR_REF(inp);
2763				return (NULL);
2764			}
2765			SCTP_INP_DECR_REF(inp);
2766			/* Switch over to the new guy */
2767			*inp_p = inp;
2768			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2769
2770			/*
2771			 * Pull it from the incomplete queue and wake the
2772			 * guy
2773			 */
2774#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2775			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2776			SCTP_TCB_UNLOCK((*stcb));
2777			SCTP_SOCKET_LOCK(so, 1);
2778#endif
2779			soisconnected(so);
2780#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2781			SCTP_TCB_LOCK((*stcb));
2782			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2783			SCTP_SOCKET_UNLOCK(so, 1);
2784#endif
2785			return (m);
2786		}
2787	}
2788	if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2789		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2790	}
2791	return (m);
2792}
2793
2794static void
2795sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2796    struct sctp_tcb *stcb, struct sctp_nets *net)
2797{
2798	/* cp must not be used, others call this without a c-ack :-) */
2799	struct sctp_association *asoc;
2800
2801	SCTPDBG(SCTP_DEBUG_INPUT2,
2802	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2803	if (stcb == NULL)
2804		return;
2805
2806	asoc = &stcb->asoc;
2807
2808	sctp_stop_all_cookie_timers(stcb);
2809	/* process according to association state */
2810	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2811		/* state change only needed when I am in right state */
2812		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2813		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2814		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2815			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2816			    stcb->sctp_ep, stcb, asoc->primary_destination);
2817
2818		}
2819		/* update RTO */
2820		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2821		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2822		if (asoc->overall_error_count == 0) {
2823			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2824			    &asoc->time_entered, sctp_align_safe_nocopy);
2825		}
2826		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2827		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2828		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2829		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2830#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2831			struct socket *so;
2832
2833#endif
2834			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2835#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2836			so = SCTP_INP_SO(stcb->sctp_ep);
2837			atomic_add_int(&stcb->asoc.refcnt, 1);
2838			SCTP_TCB_UNLOCK(stcb);
2839			SCTP_SOCKET_LOCK(so, 1);
2840			SCTP_TCB_LOCK(stcb);
2841			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2842			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2843				SCTP_SOCKET_UNLOCK(so, 1);
2844				return;
2845			}
2846#endif
2847			soisconnected(stcb->sctp_socket);
2848#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2849			SCTP_SOCKET_UNLOCK(so, 1);
2850#endif
2851		}
2852		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2853		    stcb, net);
2854		/*
2855		 * since we did not send a HB make sure we don't double
2856		 * things
2857		 */
2858		net->hb_responded = 1;
2859
2860		if (stcb->asoc.sctp_autoclose_ticks &&
2861		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2862			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2863			    stcb->sctp_ep, stcb, NULL);
2864		}
2865		/*
2866		 * send ASCONF if parameters are pending and ASCONFs are
2867		 * allowed (eg. addresses changed when init/cookie echo were
2868		 * in flight)
2869		 */
2870		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2871		    (stcb->asoc.peer_supports_asconf) &&
2872		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2873#ifdef SCTP_TIMER_BASED_ASCONF
2874			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2875			    stcb->sctp_ep, stcb,
2876			    stcb->asoc.primary_destination);
2877#else
2878			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2879			    SCTP_ADDR_NOT_LOCKED);
2880#endif
2881		}
2882	}
2883	/* Toss the cookie if I can */
2884	sctp_toss_old_cookies(stcb, asoc);
2885	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2886		/* Restart the timer if we have pending data */
2887		struct sctp_tmit_chunk *chk;
2888
2889		chk = TAILQ_FIRST(&asoc->sent_queue);
2890		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
2891	}
2892}
2893
2894static void
2895sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2896    struct sctp_tcb *stcb)
2897{
2898	struct sctp_nets *net;
2899	struct sctp_tmit_chunk *lchk;
2900	uint32_t tsn;
2901
2902	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2903		return;
2904	}
2905	SCTP_STAT_INCR(sctps_recvecne);
2906	tsn = ntohl(cp->tsn);
2907	/* ECN Nonce stuff: need a resync and disable the nonce sum check */
2908	/* Also we make sure we disable the nonce_wait */
2909	lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2910	if (lchk == NULL) {
2911		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2912	} else {
2913		stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2914	}
2915	stcb->asoc.nonce_wait_for_ecne = 0;
2916	stcb->asoc.nonce_sum_check = 0;
2917
2918	/* Find where it was sent, if possible */
2919	net = NULL;
2920	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
2921		if (lchk->rec.data.TSN_seq == tsn) {
2922			net = lchk->whoTo;
2923			break;
2924		}
2925		if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_TSN))
2926			break;
2927	}
2928	if (net == NULL)
2929		/* default is we use the primary */
2930		net = stcb->asoc.primary_destination;
2931
2932	if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2933		/*
2934		 * JRS - Use the congestion control given in the pluggable
2935		 * CC module
2936		 */
2937		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net);
2938		/*
2939		 * we reduce once every RTT. So we will only lower cwnd at
2940		 * the next sending seq i.e. the resync_tsn.
2941		 */
2942		stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2943	}
2944	/*
2945	 * We always send a CWR this way if our previous one was lost our
2946	 * peer will get an update, or if it is not time again to reduce we
2947	 * still get the cwr to the peer.
2948	 */
2949	sctp_send_cwr(stcb, net, tsn);
2950}
2951
2952static void
2953sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2954{
2955	/*
2956	 * Here we get a CWR from the peer. We must look in the outqueue and
2957	 * make sure that we have a covered ECNE in teh control chunk part.
2958	 * If so remove it.
2959	 */
2960	struct sctp_tmit_chunk *chk;
2961	struct sctp_ecne_chunk *ecne;
2962
2963	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2964		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2965			continue;
2966		}
2967		/*
2968		 * Look for and remove if it is the right TSN. Since there
2969		 * is only ONE ECNE on the control queue at any one time we
2970		 * don't need to worry about more than one!
2971		 */
2972		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2973		if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2974		    MAX_TSN) || (cp->tsn == ecne->tsn)) {
2975			/* this covers this ECNE, we can remove it */
2976			stcb->asoc.ecn_echo_cnt_onq--;
2977			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2978			    sctp_next);
2979			if (chk->data) {
2980				sctp_m_freem(chk->data);
2981				chk->data = NULL;
2982			}
2983			stcb->asoc.ctrl_queue_cnt--;
2984			sctp_free_a_chunk(stcb, chk);
2985			break;
2986		}
2987	}
2988}
2989
2990static void
2991sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2992    struct sctp_tcb *stcb, struct sctp_nets *net)
2993{
2994	struct sctp_association *asoc;
2995
2996#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2997	struct socket *so;
2998
2999#endif
3000
3001	SCTPDBG(SCTP_DEBUG_INPUT2,
3002	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3003	if (stcb == NULL)
3004		return;
3005
3006	asoc = &stcb->asoc;
3007	/* process according to association state */
3008	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3009		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3010		SCTPDBG(SCTP_DEBUG_INPUT2,
3011		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3012		SCTP_TCB_UNLOCK(stcb);
3013		return;
3014	}
3015	/* notify upper layer protocol */
3016	if (stcb->sctp_socket) {
3017		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3018		/* are the queues empty? they should be */
3019		if (!TAILQ_EMPTY(&asoc->send_queue) ||
3020		    !TAILQ_EMPTY(&asoc->sent_queue) ||
3021		    !TAILQ_EMPTY(&asoc->out_wheel)) {
3022			sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
3023		}
3024	}
3025	/* stop the timer */
3026	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
3027	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3028	/* free the TCB */
3029	SCTPDBG(SCTP_DEBUG_INPUT2,
3030	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3031#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3032	so = SCTP_INP_SO(stcb->sctp_ep);
3033	atomic_add_int(&stcb->asoc.refcnt, 1);
3034	SCTP_TCB_UNLOCK(stcb);
3035	SCTP_SOCKET_LOCK(so, 1);
3036	SCTP_TCB_LOCK(stcb);
3037	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3038#endif
3039	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3040#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3041	SCTP_SOCKET_UNLOCK(so, 1);
3042#endif
3043	return;
3044}
3045
3046static int
3047process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3048    struct sctp_nets *net, uint8_t flg)
3049{
3050	switch (desc->chunk_type) {
3051	case SCTP_DATA:
3052		/* find the tsn to resend (possibly */
3053		{
3054			uint32_t tsn;
3055			struct sctp_tmit_chunk *tp1;
3056
3057			tsn = ntohl(desc->tsn_ifany);
3058			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3059				if (tp1->rec.data.TSN_seq == tsn) {
3060					/* found it */
3061					break;
3062				}
3063				if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
3064				    MAX_TSN)) {
3065					/* not found */
3066					tp1 = NULL;
3067					break;
3068				}
3069			}
3070			if (tp1 == NULL) {
3071				/*
3072				 * Do it the other way , aka without paying
3073				 * attention to queue seq order.
3074				 */
3075				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3076				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3077					if (tp1->rec.data.TSN_seq == tsn) {
3078						/* found it */
3079						break;
3080					}
3081				}
3082			}
3083			if (tp1 == NULL) {
3084				SCTP_STAT_INCR(sctps_pdrptsnnf);
3085			}
3086			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3087				uint8_t *ddp;
3088
3089				if (((flg & SCTP_BADCRC) == 0) &&
3090				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3091					return (0);
3092				}
3093				if ((stcb->asoc.peers_rwnd == 0) &&
3094				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3095					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3096					return (0);
3097				}
3098				if (stcb->asoc.peers_rwnd == 0 &&
3099				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3100					SCTP_STAT_INCR(sctps_pdrpdizrw);
3101					return (0);
3102				}
3103				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
3104				    sizeof(struct sctp_data_chunk));
3105				{
3106					unsigned int iii;
3107
3108					for (iii = 0; iii < sizeof(desc->data_bytes);
3109					    iii++) {
3110						if (ddp[iii] != desc->data_bytes[iii]) {
3111							SCTP_STAT_INCR(sctps_pdrpbadd);
3112							return (-1);
3113						}
3114					}
3115				}
3116				/*
3117				 * We zero out the nonce so resync not
3118				 * needed
3119				 */
3120				tp1->rec.data.ect_nonce = 0;
3121
3122				if (tp1->do_rtt) {
3123					/*
3124					 * this guy had a RTO calculation
3125					 * pending on it, cancel it
3126					 */
3127					tp1->do_rtt = 0;
3128				}
3129				SCTP_STAT_INCR(sctps_pdrpmark);
3130				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3131					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3132				/*
3133				 * mark it as if we were doing a FR, since
3134				 * we will be getting gap ack reports behind
3135				 * the info from the router.
3136				 */
3137				tp1->rec.data.doing_fast_retransmit = 1;
3138				/*
3139				 * mark the tsn with what sequences can
3140				 * cause a new FR.
3141				 */
3142				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3143					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3144				} else {
3145					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
3146				}
3147
3148				/* restart the timer */
3149				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3150				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3151				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3152				    stcb, tp1->whoTo);
3153
3154				/* fix counts and things */
3155				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3156					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3157					    tp1->whoTo->flight_size,
3158					    tp1->book_size,
3159					    (uintptr_t) stcb,
3160					    tp1->rec.data.TSN_seq);
3161				}
3162				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3163					sctp_flight_size_decrease(tp1);
3164					sctp_total_flight_decrease(stcb, tp1);
3165				}
3166				tp1->sent = SCTP_DATAGRAM_RESEND;
3167			} {
3168				/* audit code */
3169				unsigned int audit;
3170
3171				audit = 0;
3172				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3173					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3174						audit++;
3175				}
3176				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3177				    sctp_next) {
3178					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3179						audit++;
3180				}
3181				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3182					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3183					    audit, stcb->asoc.sent_queue_retran_cnt);
3184#ifndef SCTP_AUDITING_ENABLED
3185					stcb->asoc.sent_queue_retran_cnt = audit;
3186#endif
3187				}
3188			}
3189		}
3190		break;
3191	case SCTP_ASCONF:
3192		{
3193			struct sctp_tmit_chunk *asconf;
3194
3195			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3196			    sctp_next) {
3197				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3198					break;
3199				}
3200			}
3201			if (asconf) {
3202				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3203					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3204				asconf->sent = SCTP_DATAGRAM_RESEND;
3205				asconf->snd_count--;
3206			}
3207		}
3208		break;
3209	case SCTP_INITIATION:
3210		/* resend the INIT */
3211		stcb->asoc.dropped_special_cnt++;
3212		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3213			/*
3214			 * If we can get it in, in a few attempts we do
3215			 * this, otherwise we let the timer fire.
3216			 */
3217			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3218			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3219			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3220		}
3221		break;
3222	case SCTP_SELECTIVE_ACK:
3223	case SCTP_NR_SELECTIVE_ACK:
3224		/* resend the sack */
3225		sctp_send_sack(stcb);
3226		break;
3227	case SCTP_HEARTBEAT_REQUEST:
3228		/* resend a demand HB */
3229		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3230			/*
3231			 * Only retransmit if we KNOW we wont destroy the
3232			 * tcb
3233			 */
3234			(void)sctp_send_hb(stcb, 1, net);
3235		}
3236		break;
3237	case SCTP_SHUTDOWN:
3238		sctp_send_shutdown(stcb, net);
3239		break;
3240	case SCTP_SHUTDOWN_ACK:
3241		sctp_send_shutdown_ack(stcb, net);
3242		break;
3243	case SCTP_COOKIE_ECHO:
3244		{
3245			struct sctp_tmit_chunk *cookie;
3246
3247			cookie = NULL;
3248			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3249			    sctp_next) {
3250				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3251					break;
3252				}
3253			}
3254			if (cookie) {
3255				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3256					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3257				cookie->sent = SCTP_DATAGRAM_RESEND;
3258				sctp_stop_all_cookie_timers(stcb);
3259			}
3260		}
3261		break;
3262	case SCTP_COOKIE_ACK:
3263		sctp_send_cookie_ack(stcb);
3264		break;
3265	case SCTP_ASCONF_ACK:
3266		/* resend last asconf ack */
3267		sctp_send_asconf_ack(stcb);
3268		break;
3269	case SCTP_FORWARD_CUM_TSN:
3270		send_forward_tsn(stcb, &stcb->asoc);
3271		break;
3272		/* can't do anything with these */
3273	case SCTP_PACKET_DROPPED:
3274	case SCTP_INITIATION_ACK:	/* this should not happen */
3275	case SCTP_HEARTBEAT_ACK:
3276	case SCTP_ABORT_ASSOCIATION:
3277	case SCTP_OPERATION_ERROR:
3278	case SCTP_SHUTDOWN_COMPLETE:
3279	case SCTP_ECN_ECHO:
3280	case SCTP_ECN_CWR:
3281	default:
3282		break;
3283	}
3284	return (0);
3285}
3286
3287void
3288sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3289{
3290	int i;
3291	uint16_t temp;
3292
3293	/*
3294	 * We set things to 0xffff since this is the last delivered sequence
3295	 * and we will be sending in 0 after the reset.
3296	 */
3297
3298	if (number_entries) {
3299		for (i = 0; i < number_entries; i++) {
3300			temp = ntohs(list[i]);
3301			if (temp >= stcb->asoc.streamincnt) {
3302				continue;
3303			}
3304			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3305		}
3306	} else {
3307		list = NULL;
3308		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3309			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3310		}
3311	}
3312	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3313}
3314
3315static void
3316sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3317{
3318	int i;
3319
3320	if (number_entries == 0) {
3321		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3322			stcb->asoc.strmout[i].next_sequence_sent = 0;
3323		}
3324	} else if (number_entries) {
3325		for (i = 0; i < number_entries; i++) {
3326			uint16_t temp;
3327
3328			temp = ntohs(list[i]);
3329			if (temp >= stcb->asoc.streamoutcnt) {
3330				/* no such stream */
3331				continue;
3332			}
3333			stcb->asoc.strmout[temp].next_sequence_sent = 0;
3334		}
3335	}
3336	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3337}
3338
3339
3340struct sctp_stream_reset_out_request *
3341sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3342{
3343	struct sctp_association *asoc;
3344	struct sctp_stream_reset_out_req *req;
3345	struct sctp_stream_reset_out_request *r;
3346	struct sctp_tmit_chunk *chk;
3347	int len, clen;
3348
3349	asoc = &stcb->asoc;
3350	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3351		asoc->stream_reset_outstanding = 0;
3352		return (NULL);
3353	}
3354	if (stcb->asoc.str_reset == NULL) {
3355		asoc->stream_reset_outstanding = 0;
3356		return (NULL);
3357	}
3358	chk = stcb->asoc.str_reset;
3359	if (chk->data == NULL) {
3360		return (NULL);
3361	}
3362	if (bchk) {
3363		/* he wants a copy of the chk pointer */
3364		*bchk = chk;
3365	}
3366	clen = chk->send_size;
3367	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
3368	r = &req->sr_req;
3369	if (ntohl(r->request_seq) == seq) {
3370		/* found it */
3371		return (r);
3372	}
3373	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3374	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3375		/* move to the next one, there can only be a max of two */
3376		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3377		if (ntohl(r->request_seq) == seq) {
3378			return (r);
3379		}
3380	}
3381	/* that seq is not here */
3382	return (NULL);
3383}
3384
3385static void
3386sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3387{
3388	struct sctp_association *asoc;
3389	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3390
3391	if (stcb->asoc.str_reset == NULL) {
3392		return;
3393	}
3394	asoc = &stcb->asoc;
3395
3396	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3397	TAILQ_REMOVE(&asoc->control_send_queue,
3398	    chk,
3399	    sctp_next);
3400	if (chk->data) {
3401		sctp_m_freem(chk->data);
3402		chk->data = NULL;
3403	}
3404	asoc->ctrl_queue_cnt--;
3405	sctp_free_a_chunk(stcb, chk);
3406	/* sa_ignore NO_NULL_CHK */
3407	stcb->asoc.str_reset = NULL;
3408}
3409
3410
3411static int
3412sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3413    uint32_t seq, uint32_t action,
3414    struct sctp_stream_reset_response *respin)
3415{
3416	uint16_t type;
3417	int lparm_len;
3418	struct sctp_association *asoc = &stcb->asoc;
3419	struct sctp_tmit_chunk *chk;
3420	struct sctp_stream_reset_out_request *srparam;
3421	int number_entries;
3422
3423	if (asoc->stream_reset_outstanding == 0) {
3424		/* duplicate */
3425		return (0);
3426	}
3427	if (seq == stcb->asoc.str_reset_seq_out) {
3428		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3429		if (srparam) {
3430			stcb->asoc.str_reset_seq_out++;
3431			type = ntohs(srparam->ph.param_type);
3432			lparm_len = ntohs(srparam->ph.param_length);
3433			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3434				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3435				asoc->stream_reset_out_is_outstanding = 0;
3436				if (asoc->stream_reset_outstanding)
3437					asoc->stream_reset_outstanding--;
3438				if (action == SCTP_STREAM_RESET_PERFORMED) {
3439					/* do it */
3440					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3441				} else {
3442					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3443				}
3444			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3445				/* Answered my request */
3446				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3447				if (asoc->stream_reset_outstanding)
3448					asoc->stream_reset_outstanding--;
3449				if (action != SCTP_STREAM_RESET_PERFORMED) {
3450					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3451				}
3452			} else if (type == SCTP_STR_RESET_ADD_STREAMS) {
3453				/* Ok we now may have more streams */
3454				if (asoc->stream_reset_outstanding)
3455					asoc->stream_reset_outstanding--;
3456				if (action == SCTP_STREAM_RESET_PERFORMED) {
3457					/* Put the new streams into effect */
3458					stcb->asoc.streamoutcnt = stcb->asoc.strm_realoutsize;
3459					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_OK, stcb,
3460					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3461				} else {
3462					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_FAIL, stcb,
3463					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3464				}
3465			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3466				/**
3467				 * a) Adopt the new in tsn.
3468				 * b) reset the map
3469				 * c) Adopt the new out-tsn
3470				 */
3471				struct sctp_stream_reset_response_tsn *resp;
3472				struct sctp_forward_tsn_chunk fwdtsn;
3473				int abort_flag = 0;
3474
3475				if (respin == NULL) {
3476					/* huh ? */
3477					return (0);
3478				}
3479				if (action == SCTP_STREAM_RESET_PERFORMED) {
3480					resp = (struct sctp_stream_reset_response_tsn *)respin;
3481					asoc->stream_reset_outstanding--;
3482					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3483					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3484					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3485					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3486					if (abort_flag) {
3487						return (1);
3488					}
3489					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3490					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3491						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3492					}
3493					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3494					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3495					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3496
3497					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3498					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3499
3500					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3501					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3502
3503					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3504					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3505
3506				}
3507			}
3508			/* get rid of the request and get the request flags */
3509			if (asoc->stream_reset_outstanding == 0) {
3510				sctp_clean_up_stream_reset(stcb);
3511			}
3512		}
3513	}
3514	return (0);
3515}
3516
3517static void
3518sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3519    struct sctp_tmit_chunk *chk,
3520    struct sctp_stream_reset_in_request *req, int trunc)
3521{
3522	uint32_t seq;
3523	int len, i;
3524	int number_entries;
3525	uint16_t temp;
3526
3527	/*
3528	 * peer wants me to send a str-reset to him for my outgoing seq's if
3529	 * seq_in is right.
3530	 */
3531	struct sctp_association *asoc = &stcb->asoc;
3532
3533	seq = ntohl(req->request_seq);
3534	if (asoc->str_reset_seq_in == seq) {
3535		if (trunc) {
3536			/* Can't do it, since they exceeded our buffer size  */
3537			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3538			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3539			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3540		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3541			len = ntohs(req->ph.param_length);
3542			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3543			for (i = 0; i < number_entries; i++) {
3544				temp = ntohs(req->list_of_streams[i]);
3545				req->list_of_streams[i] = temp;
3546			}
3547			/* move the reset action back one */
3548			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3549			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3550			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3551			    asoc->str_reset_seq_out,
3552			    seq, (asoc->sending_seq - 1));
3553			asoc->stream_reset_out_is_outstanding = 1;
3554			asoc->str_reset = chk;
3555			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3556			stcb->asoc.stream_reset_outstanding++;
3557		} else {
3558			/* Can't do it, since we have sent one out */
3559			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3560			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
3561			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3562		}
3563		asoc->str_reset_seq_in++;
3564	} else if (asoc->str_reset_seq_in - 1 == seq) {
3565		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3566	} else if (asoc->str_reset_seq_in - 2 == seq) {
3567		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3568	} else {
3569		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3570	}
3571}
3572
3573static int
3574sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3575    struct sctp_tmit_chunk *chk,
3576    struct sctp_stream_reset_tsn_request *req)
3577{
3578	/* reset all in and out and update the tsn */
3579	/*
3580	 * A) reset my str-seq's on in and out. B) Select a receive next,
3581	 * and set cum-ack to it. Also process this selected number as a
3582	 * fwd-tsn as well. C) set in the response my next sending seq.
3583	 */
3584	struct sctp_forward_tsn_chunk fwdtsn;
3585	struct sctp_association *asoc = &stcb->asoc;
3586	int abort_flag = 0;
3587	uint32_t seq;
3588
3589	seq = ntohl(req->request_seq);
3590	if (asoc->str_reset_seq_in == seq) {
3591		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3592		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3593		fwdtsn.ch.chunk_flags = 0;
3594		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3595		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3596		if (abort_flag) {
3597			return (1);
3598		}
3599		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3600		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3601			sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3602		}
3603		stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3604		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3605		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3606		stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3607		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3608		atomic_add_int(&stcb->asoc.sending_seq, 1);
3609		/* save off historical data for retrans */
3610		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
3611		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
3612		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
3613		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
3614
3615		sctp_add_stream_reset_result_tsn(chk,
3616		    ntohl(req->request_seq),
3617		    SCTP_STREAM_RESET_PERFORMED,
3618		    stcb->asoc.sending_seq,
3619		    stcb->asoc.mapping_array_base_tsn);
3620		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3621		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3622		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3623		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3624
3625		asoc->str_reset_seq_in++;
3626	} else if (asoc->str_reset_seq_in - 1 == seq) {
3627		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3628		    stcb->asoc.last_sending_seq[0],
3629		    stcb->asoc.last_base_tsnsent[0]
3630		    );
3631	} else if (asoc->str_reset_seq_in - 2 == seq) {
3632		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3633		    stcb->asoc.last_sending_seq[1],
3634		    stcb->asoc.last_base_tsnsent[1]
3635		    );
3636	} else {
3637		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3638	}
3639	return (0);
3640}
3641
3642static void
3643sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3644    struct sctp_tmit_chunk *chk,
3645    struct sctp_stream_reset_out_request *req, int trunc)
3646{
3647	uint32_t seq, tsn;
3648	int number_entries, len;
3649	struct sctp_association *asoc = &stcb->asoc;
3650
3651	seq = ntohl(req->request_seq);
3652
3653	/* now if its not a duplicate we process it */
3654	if (asoc->str_reset_seq_in == seq) {
3655		len = ntohs(req->ph.param_length);
3656		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3657		/*
3658		 * the sender is resetting, handle the list issue.. we must
3659		 * a) verify if we can do the reset, if so no problem b) If
3660		 * we can't do the reset we must copy the request. c) queue
3661		 * it, and setup the data in processor to trigger it off
3662		 * when needed and dequeue all the queued data.
3663		 */
3664		tsn = ntohl(req->send_reset_at_tsn);
3665
3666		/* move the reset action back one */
3667		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3668		if (trunc) {
3669			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3670			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3671		} else if ((tsn == asoc->cumulative_tsn) ||
3672		    (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3673			/* we can do it now */
3674			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3675			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3676			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3677		} else {
3678			/*
3679			 * we must queue it up and thus wait for the TSN's
3680			 * to arrive that are at or before tsn
3681			 */
3682			struct sctp_stream_reset_list *liste;
3683			int siz;
3684
3685			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3686			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3687			    siz, SCTP_M_STRESET);
3688			if (liste == NULL) {
3689				/* gak out of memory */
3690				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3691				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3692				return;
3693			}
3694			liste->tsn = tsn;
3695			liste->number_entries = number_entries;
3696			memcpy(&liste->req, req,
3697			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3698			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3699			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3700			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3701		}
3702		asoc->str_reset_seq_in++;
3703	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3704		/*
3705		 * one seq back, just echo back last action since my
3706		 * response was lost.
3707		 */
3708		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3709	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3710		/*
3711		 * two seq back, just echo back last action since my
3712		 * response was lost.
3713		 */
3714		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3715	} else {
3716		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3717	}
3718}
3719
3720static void
3721sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3722    struct sctp_stream_reset_add_strm *str_add)
3723{
3724	/*
3725	 * Peer is requesting to add more streams. If its within our
3726	 * max-streams we will allow it.
3727	 */
3728	uint16_t num_stream, i;
3729	uint32_t seq;
3730	struct sctp_association *asoc = &stcb->asoc;
3731	struct sctp_queued_to_read *ctl, *nctl;
3732
3733	/* Get the number. */
3734	seq = ntohl(str_add->request_seq);
3735	num_stream = ntohs(str_add->number_of_streams);
3736	/* Now what would be the new total? */
3737	if (asoc->str_reset_seq_in == seq) {
3738		num_stream += stcb->asoc.streamincnt;
3739		if (num_stream > stcb->asoc.max_inbound_streams) {
3740			/* We must reject it they ask for to many */
3741	denied:
3742			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3743			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3744			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3745		} else {
3746			/* Ok, we can do that :-) */
3747			struct sctp_stream_in *oldstrm;
3748
3749			/* save off the old */
3750			oldstrm = stcb->asoc.strmin;
3751			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
3752			    (num_stream * sizeof(struct sctp_stream_in)),
3753			    SCTP_M_STRMI);
3754			if (stcb->asoc.strmin == NULL) {
3755				stcb->asoc.strmin = oldstrm;
3756				goto denied;
3757			}
3758			/* copy off the old data */
3759			for (i = 0; i < stcb->asoc.streamincnt; i++) {
3760				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3761				stcb->asoc.strmin[i].stream_no = i;
3762				stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
3763				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
3764				/* now anything on those queues? */
3765				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next, nctl) {
3766					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next);
3767					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next);
3768				}
3769			}
3770			/* Init the new streams */
3771			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
3772				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3773				stcb->asoc.strmin[i].stream_no = i;
3774				stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3775				stcb->asoc.strmin[i].delivery_started = 0;
3776			}
3777			SCTP_FREE(oldstrm, SCTP_M_STRMI);
3778			/* update the size */
3779			stcb->asoc.streamincnt = num_stream;
3780			/* Send the ack */
3781			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3782			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3783			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3784			sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK, stcb,
3785			    (uint32_t) stcb->asoc.streamincnt, NULL, SCTP_SO_NOT_LOCKED);
3786		}
3787	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3788		/*
3789		 * one seq back, just echo back last action since my
3790		 * response was lost.
3791		 */
3792		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3793	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3794		/*
3795		 * two seq back, just echo back last action since my
3796		 * response was lost.
3797		 */
3798		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3799	} else {
3800		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3801
3802	}
3803}
3804
3805#ifdef __GNUC__
3806__attribute__((noinline))
3807#endif
3808	static int
3809	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3810        struct sctp_stream_reset_out_req *sr_req)
3811{
3812	int chk_length, param_len, ptype;
3813	struct sctp_paramhdr pstore;
3814	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3815
3816	uint32_t seq;
3817	int num_req = 0;
3818	int trunc = 0;
3819	struct sctp_tmit_chunk *chk;
3820	struct sctp_chunkhdr *ch;
3821	struct sctp_paramhdr *ph;
3822	int ret_code = 0;
3823	int num_param = 0;
3824
3825	/* now it may be a reset or a reset-response */
3826	chk_length = ntohs(sr_req->ch.chunk_length);
3827
3828	/* setup for adding the response */
3829	sctp_alloc_a_chunk(stcb, chk);
3830	if (chk == NULL) {
3831		return (ret_code);
3832	}
3833	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3834	chk->rec.chunk_id.can_take_data = 0;
3835	chk->asoc = &stcb->asoc;
3836	chk->no_fr_allowed = 0;
3837	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3838	chk->book_size_scale = 0;
3839	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3840	if (chk->data == NULL) {
3841strres_nochunk:
3842		if (chk->data) {
3843			sctp_m_freem(chk->data);
3844			chk->data = NULL;
3845		}
3846		sctp_free_a_chunk(stcb, chk);
3847		return (ret_code);
3848	}
3849	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3850
3851	/* setup chunk parameters */
3852	chk->sent = SCTP_DATAGRAM_UNSENT;
3853	chk->snd_count = 0;
3854	chk->whoTo = stcb->asoc.primary_destination;
3855	atomic_add_int(&chk->whoTo->ref_count, 1);
3856
3857	ch = mtod(chk->data, struct sctp_chunkhdr *);
3858	ch->chunk_type = SCTP_STREAM_RESET;
3859	ch->chunk_flags = 0;
3860	ch->chunk_length = htons(chk->send_size);
3861	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3862	offset += sizeof(struct sctp_chunkhdr);
3863	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3864		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
3865		if (ph == NULL)
3866			break;
3867		param_len = ntohs(ph->param_length);
3868		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3869			/* bad param */
3870			break;
3871		}
3872		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
3873		    (uint8_t *) & cstore);
3874		ptype = ntohs(ph->param_type);
3875		num_param++;
3876		if (param_len > (int)sizeof(cstore)) {
3877			trunc = 1;
3878		} else {
3879			trunc = 0;
3880		}
3881
3882		if (num_param > SCTP_MAX_RESET_PARAMS) {
3883			/* hit the max of parameters already sorry.. */
3884			break;
3885		}
3886		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3887			struct sctp_stream_reset_out_request *req_out;
3888
3889			req_out = (struct sctp_stream_reset_out_request *)ph;
3890			num_req++;
3891			if (stcb->asoc.stream_reset_outstanding) {
3892				seq = ntohl(req_out->response_seq);
3893				if (seq == stcb->asoc.str_reset_seq_out) {
3894					/* implicit ack */
3895					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3896				}
3897			}
3898			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
3899		} else if (ptype == SCTP_STR_RESET_ADD_STREAMS) {
3900			struct sctp_stream_reset_add_strm *str_add;
3901
3902			str_add = (struct sctp_stream_reset_add_strm *)ph;
3903			num_req++;
3904			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
3905		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3906			struct sctp_stream_reset_in_request *req_in;
3907
3908			num_req++;
3909
3910			req_in = (struct sctp_stream_reset_in_request *)ph;
3911
3912			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
3913		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3914			struct sctp_stream_reset_tsn_request *req_tsn;
3915
3916			num_req++;
3917			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3918
3919			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3920				ret_code = 1;
3921				goto strres_nochunk;
3922			}
3923			/* no more */
3924			break;
3925		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3926			struct sctp_stream_reset_response *resp;
3927			uint32_t result;
3928
3929			resp = (struct sctp_stream_reset_response *)ph;
3930			seq = ntohl(resp->response_seq);
3931			result = ntohl(resp->result);
3932			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3933				ret_code = 1;
3934				goto strres_nochunk;
3935			}
3936		} else {
3937			break;
3938		}
3939		offset += SCTP_SIZE32(param_len);
3940		chk_length -= SCTP_SIZE32(param_len);
3941	}
3942	if (num_req == 0) {
3943		/* we have no response free the stuff */
3944		goto strres_nochunk;
3945	}
3946	/* ok we have a chunk to link in */
3947	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3948	    chk,
3949	    sctp_next);
3950	stcb->asoc.ctrl_queue_cnt++;
3951	return (ret_code);
3952}
3953
3954/*
3955 * Handle a router or endpoints report of a packet loss, there are two ways
3956 * to handle this, either we get the whole packet and must disect it
3957 * ourselves (possibly with truncation and or corruption) or it is a summary
3958 * from a middle box that did the disectting for us.
3959 */
3960static void
3961sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3962    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
3963{
3964	uint32_t bottle_bw, on_queue;
3965	uint16_t trunc_len;
3966	unsigned int chlen;
3967	unsigned int at;
3968	struct sctp_chunk_desc desc;
3969	struct sctp_chunkhdr *ch;
3970
3971	chlen = ntohs(cp->ch.chunk_length);
3972	chlen -= sizeof(struct sctp_pktdrop_chunk);
3973	/* XXX possible chlen underflow */
3974	if (chlen == 0) {
3975		ch = NULL;
3976		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3977			SCTP_STAT_INCR(sctps_pdrpbwrpt);
3978	} else {
3979		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3980		chlen -= sizeof(struct sctphdr);
3981		/* XXX possible chlen underflow */
3982		memset(&desc, 0, sizeof(desc));
3983	}
3984	trunc_len = (uint16_t) ntohs(cp->trunc_len);
3985	if (trunc_len > limit) {
3986		trunc_len = limit;
3987	}
3988	/* now the chunks themselves */
3989	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3990		desc.chunk_type = ch->chunk_type;
3991		/* get amount we need to move */
3992		at = ntohs(ch->chunk_length);
3993		if (at < sizeof(struct sctp_chunkhdr)) {
3994			/* corrupt chunk, maybe at the end? */
3995			SCTP_STAT_INCR(sctps_pdrpcrupt);
3996			break;
3997		}
3998		if (trunc_len == 0) {
3999			/* we are supposed to have all of it */
4000			if (at > chlen) {
4001				/* corrupt skip it */
4002				SCTP_STAT_INCR(sctps_pdrpcrupt);
4003				break;
4004			}
4005		} else {
4006			/* is there enough of it left ? */
4007			if (desc.chunk_type == SCTP_DATA) {
4008				if (chlen < (sizeof(struct sctp_data_chunk) +
4009				    sizeof(desc.data_bytes))) {
4010					break;
4011				}
4012			} else {
4013				if (chlen < sizeof(struct sctp_chunkhdr)) {
4014					break;
4015				}
4016			}
4017		}
4018		if (desc.chunk_type == SCTP_DATA) {
4019			/* can we get out the tsn? */
4020			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4021				SCTP_STAT_INCR(sctps_pdrpmbda);
4022
4023			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4024				/* yep */
4025				struct sctp_data_chunk *dcp;
4026				uint8_t *ddp;
4027				unsigned int iii;
4028
4029				dcp = (struct sctp_data_chunk *)ch;
4030				ddp = (uint8_t *) (dcp + 1);
4031				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4032					desc.data_bytes[iii] = ddp[iii];
4033				}
4034				desc.tsn_ifany = dcp->dp.tsn;
4035			} else {
4036				/* nope we are done. */
4037				SCTP_STAT_INCR(sctps_pdrpnedat);
4038				break;
4039			}
4040		} else {
4041			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4042				SCTP_STAT_INCR(sctps_pdrpmbct);
4043		}
4044
4045		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4046			SCTP_STAT_INCR(sctps_pdrppdbrk);
4047			break;
4048		}
4049		if (SCTP_SIZE32(at) > chlen) {
4050			break;
4051		}
4052		chlen -= SCTP_SIZE32(at);
4053		if (chlen < sizeof(struct sctp_chunkhdr)) {
4054			/* done, none left */
4055			break;
4056		}
4057		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4058	}
4059	/* Now update any rwnd --- possibly */
4060	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4061		/* From a peer, we get a rwnd report */
4062		uint32_t a_rwnd;
4063
4064		SCTP_STAT_INCR(sctps_pdrpfehos);
4065
4066		bottle_bw = ntohl(cp->bottle_bw);
4067		on_queue = ntohl(cp->current_onq);
4068		if (bottle_bw && on_queue) {
4069			/* a rwnd report is in here */
4070			if (bottle_bw > on_queue)
4071				a_rwnd = bottle_bw - on_queue;
4072			else
4073				a_rwnd = 0;
4074
4075			if (a_rwnd == 0)
4076				stcb->asoc.peers_rwnd = 0;
4077			else {
4078				if (a_rwnd > stcb->asoc.total_flight) {
4079					stcb->asoc.peers_rwnd =
4080					    a_rwnd - stcb->asoc.total_flight;
4081				} else {
4082					stcb->asoc.peers_rwnd = 0;
4083				}
4084				if (stcb->asoc.peers_rwnd <
4085				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4086					/* SWS sender side engages */
4087					stcb->asoc.peers_rwnd = 0;
4088				}
4089			}
4090		}
4091	} else {
4092		SCTP_STAT_INCR(sctps_pdrpfmbox);
4093	}
4094
4095	/* now middle boxes in sat networks get a cwnd bump */
4096	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4097	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4098	    (stcb->asoc.sat_network)) {
4099		/*
4100		 * This is debateable but for sat networks it makes sense
4101		 * Note if a T3 timer has went off, we will prohibit any
4102		 * changes to cwnd until we exit the t3 loss recovery.
4103		 */
4104		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4105		    net, cp, &bottle_bw, &on_queue);
4106	}
4107}
4108
4109/*
4110 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4111 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4112 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4113 * length of the complete packet outputs: - length: modified to remaining
4114 * length after control processing - netp: modified to new sctp_nets after
4115 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4116 * bad packet,...) otherwise return the tcb for this packet
4117 */
4118#ifdef __GNUC__
4119__attribute__((noinline))
4120#endif
4121	static struct sctp_tcb *
4122	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4123             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4124             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4125             uint32_t vrf_id, uint16_t port)
4126{
4127	struct sctp_association *asoc;
4128	uint32_t vtag_in;
4129	int num_chunks = 0;	/* number of control chunks processed */
4130	uint32_t chk_length;
4131	int ret;
4132	int abort_no_unlock = 0;
4133
4134	/*
4135	 * How big should this be, and should it be alloc'd? Lets try the
4136	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4137	 * until we get into jumbo grams and such..
4138	 */
4139	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4140	struct sctp_tcb *locked_tcb = stcb;
4141	int got_auth = 0;
4142	uint32_t auth_offset = 0, auth_len = 0;
4143	int auth_skipped = 0;
4144	int asconf_cnt = 0;
4145
4146#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4147	struct socket *so;
4148
4149#endif
4150
4151	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4152	    iphlen, *offset, length, stcb);
4153
4154	/* validate chunk header length... */
4155	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4156		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4157		    ntohs(ch->chunk_length));
4158		if (locked_tcb) {
4159			SCTP_TCB_UNLOCK(locked_tcb);
4160		}
4161		return (NULL);
4162	}
4163	/*
4164	 * validate the verification tag
4165	 */
4166	vtag_in = ntohl(sh->v_tag);
4167
4168	if (locked_tcb) {
4169		SCTP_TCB_LOCK_ASSERT(locked_tcb);
4170	}
4171	if (ch->chunk_type == SCTP_INITIATION) {
4172		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4173		    ntohs(ch->chunk_length), vtag_in);
4174		if (vtag_in != 0) {
4175			/* protocol error- silently discard... */
4176			SCTP_STAT_INCR(sctps_badvtag);
4177			if (locked_tcb) {
4178				SCTP_TCB_UNLOCK(locked_tcb);
4179			}
4180			return (NULL);
4181		}
4182	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4183		/*
4184		 * If there is no stcb, skip the AUTH chunk and process
4185		 * later after a stcb is found (to validate the lookup was
4186		 * valid.
4187		 */
4188		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4189		    (stcb == NULL) &&
4190		    !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4191			/* save this chunk for later processing */
4192			auth_skipped = 1;
4193			auth_offset = *offset;
4194			auth_len = ntohs(ch->chunk_length);
4195
4196			/* (temporarily) move past this chunk */
4197			*offset += SCTP_SIZE32(auth_len);
4198			if (*offset >= length) {
4199				/* no more data left in the mbuf chain */
4200				*offset = length;
4201				if (locked_tcb) {
4202					SCTP_TCB_UNLOCK(locked_tcb);
4203				}
4204				return (NULL);
4205			}
4206			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4207			    sizeof(struct sctp_chunkhdr), chunk_buf);
4208		}
4209		if (ch == NULL) {
4210			/* Help */
4211			*offset = length;
4212			if (locked_tcb) {
4213				SCTP_TCB_UNLOCK(locked_tcb);
4214			}
4215			return (NULL);
4216		}
4217		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4218			goto process_control_chunks;
4219		}
4220		/*
4221		 * first check if it's an ASCONF with an unknown src addr we
4222		 * need to look inside to find the association
4223		 */
4224		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4225			struct sctp_chunkhdr *asconf_ch = ch;
4226			uint32_t asconf_offset = 0, asconf_len = 0;
4227
4228			/* inp's refcount may be reduced */
4229			SCTP_INP_INCR_REF(inp);
4230
4231			asconf_offset = *offset;
4232			do {
4233				asconf_len = ntohs(asconf_ch->chunk_length);
4234				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4235					break;
4236				stcb = sctp_findassociation_ep_asconf(m, iphlen,
4237				    *offset, sh, &inp, netp, vrf_id);
4238				if (stcb != NULL)
4239					break;
4240				asconf_offset += SCTP_SIZE32(asconf_len);
4241				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4242				    sizeof(struct sctp_chunkhdr), chunk_buf);
4243			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4244			if (stcb == NULL) {
4245				/*
4246				 * reduce inp's refcount if not reduced in
4247				 * sctp_findassociation_ep_asconf().
4248				 */
4249				SCTP_INP_DECR_REF(inp);
4250			} else {
4251				locked_tcb = stcb;
4252			}
4253
4254			/* now go back and verify any auth chunk to be sure */
4255			if (auth_skipped && (stcb != NULL)) {
4256				struct sctp_auth_chunk *auth;
4257
4258				auth = (struct sctp_auth_chunk *)
4259				    sctp_m_getptr(m, auth_offset,
4260				    auth_len, chunk_buf);
4261				got_auth = 1;
4262				auth_skipped = 0;
4263				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4264				    auth_offset)) {
4265					/* auth HMAC failed so dump it */
4266					*offset = length;
4267					if (locked_tcb) {
4268						SCTP_TCB_UNLOCK(locked_tcb);
4269					}
4270					return (NULL);
4271				} else {
4272					/* remaining chunks are HMAC checked */
4273					stcb->asoc.authenticated = 1;
4274				}
4275			}
4276		}
4277		if (stcb == NULL) {
4278			/* no association, so it's out of the blue... */
4279			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
4280			    vrf_id, port);
4281			*offset = length;
4282			if (locked_tcb) {
4283				SCTP_TCB_UNLOCK(locked_tcb);
4284			}
4285			return (NULL);
4286		}
4287		asoc = &stcb->asoc;
4288		/* ABORT and SHUTDOWN can use either v_tag... */
4289		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4290		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4291		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4292			if ((vtag_in == asoc->my_vtag) ||
4293			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
4294			    (vtag_in == asoc->peer_vtag))) {
4295				/* this is valid */
4296			} else {
4297				/* drop this packet... */
4298				SCTP_STAT_INCR(sctps_badvtag);
4299				if (locked_tcb) {
4300					SCTP_TCB_UNLOCK(locked_tcb);
4301				}
4302				return (NULL);
4303			}
4304		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4305			if (vtag_in != asoc->my_vtag) {
4306				/*
4307				 * this could be a stale SHUTDOWN-ACK or the
4308				 * peer never got the SHUTDOWN-COMPLETE and
4309				 * is still hung; we have started a new asoc
4310				 * but it won't complete until the shutdown
4311				 * is completed
4312				 */
4313				if (locked_tcb) {
4314					SCTP_TCB_UNLOCK(locked_tcb);
4315				}
4316				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
4317				    NULL, vrf_id, port);
4318				return (NULL);
4319			}
4320		} else {
4321			/* for all other chunks, vtag must match */
4322			if (vtag_in != asoc->my_vtag) {
4323				/* invalid vtag... */
4324				SCTPDBG(SCTP_DEBUG_INPUT3,
4325				    "invalid vtag: %xh, expect %xh\n",
4326				    vtag_in, asoc->my_vtag);
4327				SCTP_STAT_INCR(sctps_badvtag);
4328				if (locked_tcb) {
4329					SCTP_TCB_UNLOCK(locked_tcb);
4330				}
4331				*offset = length;
4332				return (NULL);
4333			}
4334		}
4335	}			/* end if !SCTP_COOKIE_ECHO */
4336	/*
4337	 * process all control chunks...
4338	 */
4339	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4340	/* EY */
4341	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4342	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4343	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4344		/* implied cookie-ack.. we must have lost the ack */
4345		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4346			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4347			    stcb->asoc.overall_error_count,
4348			    0,
4349			    SCTP_FROM_SCTP_INPUT,
4350			    __LINE__);
4351		}
4352		stcb->asoc.overall_error_count = 0;
4353		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4354		    *netp);
4355	}
4356process_control_chunks:
4357	while (IS_SCTP_CONTROL(ch)) {
4358		/* validate chunk length */
4359		chk_length = ntohs(ch->chunk_length);
4360		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4361		    ch->chunk_type, chk_length);
4362		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4363		if (chk_length < sizeof(*ch) ||
4364		    (*offset + (int)chk_length) > length) {
4365			*offset = length;
4366			if (locked_tcb) {
4367				SCTP_TCB_UNLOCK(locked_tcb);
4368			}
4369			return (NULL);
4370		}
4371		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4372		/*
4373		 * INIT-ACK only gets the init ack "header" portion only
4374		 * because we don't have to process the peer's COOKIE. All
4375		 * others get a complete chunk.
4376		 */
4377		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4378		    (ch->chunk_type == SCTP_INITIATION)) {
4379			/* get an init-ack chunk */
4380			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4381			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4382			if (ch == NULL) {
4383				*offset = length;
4384				if (locked_tcb) {
4385					SCTP_TCB_UNLOCK(locked_tcb);
4386				}
4387				return (NULL);
4388			}
4389		} else {
4390			/* For cookies and all other chunks. */
4391			if (chk_length > sizeof(chunk_buf)) {
4392				/*
4393				 * use just the size of the chunk buffer so
4394				 * the front part of our chunks fit in
4395				 * contiguous space up to the chunk buffer
4396				 * size (508 bytes). For chunks that need to
4397				 * get more than that they must use the
4398				 * sctp_m_getptr() function or other means
4399				 * (e.g. know how to parse mbuf chains).
4400				 * Cookies do this already.
4401				 */
4402				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4403				    (sizeof(chunk_buf) - 4),
4404				    chunk_buf);
4405				if (ch == NULL) {
4406					*offset = length;
4407					if (locked_tcb) {
4408						SCTP_TCB_UNLOCK(locked_tcb);
4409					}
4410					return (NULL);
4411				}
4412			} else {
4413				/* We can fit it all */
4414				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4415				    chk_length, chunk_buf);
4416				if (ch == NULL) {
4417					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4418					*offset = length;
4419					if (locked_tcb) {
4420						SCTP_TCB_UNLOCK(locked_tcb);
4421					}
4422					return (NULL);
4423				}
4424			}
4425		}
4426		num_chunks++;
4427		/* Save off the last place we got a control from */
4428		if (stcb != NULL) {
4429			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4430				/*
4431				 * allow last_control to be NULL if
4432				 * ASCONF... ASCONF processing will find the
4433				 * right net later
4434				 */
4435				if ((netp != NULL) && (*netp != NULL))
4436					stcb->asoc.last_control_chunk_from = *netp;
4437			}
4438		}
4439#ifdef SCTP_AUDITING_ENABLED
4440		sctp_audit_log(0xB0, ch->chunk_type);
4441#endif
4442
4443		/* check to see if this chunk required auth, but isn't */
4444		if ((stcb != NULL) &&
4445		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
4446		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4447		    !stcb->asoc.authenticated) {
4448			/* "silently" ignore */
4449			SCTP_STAT_INCR(sctps_recvauthmissing);
4450			goto next_chunk;
4451		}
4452		switch (ch->chunk_type) {
4453		case SCTP_INITIATION:
4454			/* must be first and only chunk */
4455			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4456			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4457				/* We are not interested anymore? */
4458				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4459					/*
4460					 * collision case where we are
4461					 * sending to them too
4462					 */
4463					;
4464				} else {
4465					if (locked_tcb) {
4466						SCTP_TCB_UNLOCK(locked_tcb);
4467					}
4468					*offset = length;
4469					return (NULL);
4470				}
4471			}
4472			if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
4473			    (num_chunks > 1) ||
4474			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4475				*offset = length;
4476				if (locked_tcb) {
4477					SCTP_TCB_UNLOCK(locked_tcb);
4478				}
4479				return (NULL);
4480			}
4481			if ((stcb != NULL) &&
4482			    (SCTP_GET_STATE(&stcb->asoc) ==
4483			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4484				sctp_send_shutdown_ack(stcb,
4485				    stcb->asoc.primary_destination);
4486				*offset = length;
4487				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4488				if (locked_tcb) {
4489					SCTP_TCB_UNLOCK(locked_tcb);
4490				}
4491				return (NULL);
4492			}
4493			if (netp) {
4494				sctp_handle_init(m, iphlen, *offset, sh,
4495				    (struct sctp_init_chunk *)ch, inp,
4496				    stcb, *netp, &abort_no_unlock, vrf_id, port);
4497			}
4498			if (abort_no_unlock)
4499				return (NULL);
4500
4501			*offset = length;
4502			if (locked_tcb) {
4503				SCTP_TCB_UNLOCK(locked_tcb);
4504			}
4505			return (NULL);
4506			break;
4507		case SCTP_PAD_CHUNK:
4508			break;
4509		case SCTP_INITIATION_ACK:
4510			/* must be first and only chunk */
4511			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4512			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4513				/* We are not interested anymore */
4514				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4515					;
4516				} else {
4517					if (locked_tcb != stcb) {
4518						/* Very unlikely */
4519						SCTP_TCB_UNLOCK(locked_tcb);
4520					}
4521					*offset = length;
4522					if (stcb) {
4523#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4524						so = SCTP_INP_SO(inp);
4525						atomic_add_int(&stcb->asoc.refcnt, 1);
4526						SCTP_TCB_UNLOCK(stcb);
4527						SCTP_SOCKET_LOCK(so, 1);
4528						SCTP_TCB_LOCK(stcb);
4529						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4530#endif
4531						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4532#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4533						SCTP_SOCKET_UNLOCK(so, 1);
4534#endif
4535					}
4536					return (NULL);
4537				}
4538			}
4539			if ((num_chunks > 1) ||
4540			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4541				*offset = length;
4542				if (locked_tcb) {
4543					SCTP_TCB_UNLOCK(locked_tcb);
4544				}
4545				return (NULL);
4546			}
4547			if ((netp) && (*netp)) {
4548				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
4549				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
4550			} else {
4551				ret = -1;
4552			}
4553			/*
4554			 * Special case, I must call the output routine to
4555			 * get the cookie echoed
4556			 */
4557			if (abort_no_unlock)
4558				return (NULL);
4559
4560			if ((stcb) && ret == 0)
4561				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4562			*offset = length;
4563			if (locked_tcb) {
4564				SCTP_TCB_UNLOCK(locked_tcb);
4565			}
4566			return (NULL);
4567			break;
4568		case SCTP_SELECTIVE_ACK:
4569			{
4570				struct sctp_sack_chunk *sack;
4571				int abort_now = 0;
4572				uint32_t a_rwnd, cum_ack;
4573				uint16_t num_seg, num_dup;
4574				uint8_t flags;
4575				int offset_seg, offset_dup;
4576				int nonce_sum_flag;
4577
4578				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4579				SCTP_STAT_INCR(sctps_recvsacks);
4580				if (stcb == NULL) {
4581					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
4582					break;
4583				}
4584				if (chk_length < sizeof(struct sctp_sack_chunk)) {
4585					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
4586					break;
4587				}
4588				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4589					/*-
4590					 * If we have sent a shutdown-ack, we will pay no
4591					 * attention to a sack sent in to us since
4592					 * we don't care anymore.
4593					 */
4594					break;
4595				}
4596				sack = (struct sctp_sack_chunk *)ch;
4597				flags = ch->chunk_flags;
4598				nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4599				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4600				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4601				num_dup = ntohs(sack->sack.num_dup_tsns);
4602				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4603				if (sizeof(struct sctp_sack_chunk) +
4604				    num_seg * sizeof(struct sctp_gap_ack_block) +
4605				    num_dup * sizeof(uint32_t) != chk_length) {
4606					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
4607					break;
4608				}
4609				offset_seg = *offset + sizeof(struct sctp_sack_chunk);
4610				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4611				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4612				    cum_ack, num_seg, a_rwnd);
4613				stcb->asoc.seen_a_sack_this_pkt = 1;
4614				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4615				    (num_seg == 0) &&
4616				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4617				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4618				    (stcb->asoc.saw_sack_with_frags == 0) &&
4619				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4620				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4621				    ) {
4622					/*
4623					 * We have a SIMPLE sack having no
4624					 * prior segments and data on sent
4625					 * queue to be acked.. Use the
4626					 * faster path sack processing. We
4627					 * also allow window update sacks
4628					 * with no missing segments to go
4629					 * this way too.
4630					 */
4631					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4632					    &abort_now);
4633				} else {
4634					if (netp && *netp)
4635						sctp_handle_sack(m, offset_seg, offset_dup,
4636						    stcb, *netp,
4637						    num_seg, 0, num_dup, &abort_now, flags,
4638						    cum_ack, a_rwnd);
4639				}
4640				if (abort_now) {
4641					/* ABORT signal from sack processing */
4642					*offset = length;
4643					return (NULL);
4644				}
4645				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4646				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4647				    (stcb->asoc.stream_queue_cnt == 0)) {
4648					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4649				}
4650			}
4651			break;
4652			/*
4653			 * EY - nr_sack:  If the received chunk is an
4654			 * nr_sack chunk
4655			 */
4656		case SCTP_NR_SELECTIVE_ACK:
4657			{
4658				struct sctp_nr_sack_chunk *nr_sack;
4659				int abort_now = 0;
4660				uint32_t a_rwnd, cum_ack;
4661				uint16_t num_seg, num_nr_seg, num_dup;
4662				uint8_t flags;
4663				int offset_seg, offset_dup;
4664				int nonce_sum_flag;
4665
4666				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
4667				SCTP_STAT_INCR(sctps_recvsacks);
4668				if (stcb == NULL) {
4669					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
4670					break;
4671				}
4672				if ((stcb->asoc.sctp_nr_sack_on_off == 0) ||
4673				    (stcb->asoc.peer_supports_nr_sack == 0)) {
4674					goto unknown_chunk;
4675				}
4676				if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
4677					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
4678					break;
4679				}
4680				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4681					/*-
4682					 * If we have sent a shutdown-ack, we will pay no
4683					 * attention to a sack sent in to us since
4684					 * we don't care anymore.
4685					 */
4686					break;
4687				}
4688				nr_sack = (struct sctp_nr_sack_chunk *)ch;
4689				flags = ch->chunk_flags;
4690				nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4691
4692				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4693				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4694				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4695				num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
4696				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
4697				if (sizeof(struct sctp_nr_sack_chunk) +
4698				    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
4699				    num_dup * sizeof(uint32_t) != chk_length) {
4700					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
4701					break;
4702				}
4703				offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
4704				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4705				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4706				    cum_ack, num_seg, a_rwnd);
4707				stcb->asoc.seen_a_sack_this_pkt = 1;
4708				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4709				    (num_seg == 0) && (num_nr_seg == 0) &&
4710				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4711				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4712				    (stcb->asoc.saw_sack_with_frags == 0) &&
4713				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4714				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
4715					/*
4716					 * We have a SIMPLE sack having no
4717					 * prior segments and data on sent
4718					 * queue to be acked. Use the faster
4719					 * path sack processing. We also
4720					 * allow window update sacks with no
4721					 * missing segments to go this way
4722					 * too.
4723					 */
4724					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4725					    &abort_now);
4726				} else {
4727					if (netp && *netp)
4728						sctp_handle_sack(m, offset_seg, offset_dup,
4729						    stcb, *netp,
4730						    num_seg, num_nr_seg, num_dup, &abort_now, flags,
4731						    cum_ack, a_rwnd);
4732				}
4733				if (abort_now) {
4734					/* ABORT signal from sack processing */
4735					*offset = length;
4736					return (NULL);
4737				}
4738				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4739				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4740				    (stcb->asoc.stream_queue_cnt == 0)) {
4741					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4742				}
4743			}
4744			break;
4745
4746		case SCTP_HEARTBEAT_REQUEST:
4747			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
4748			if ((stcb) && netp && *netp) {
4749				SCTP_STAT_INCR(sctps_recvheartbeat);
4750				sctp_send_heartbeat_ack(stcb, m, *offset,
4751				    chk_length, *netp);
4752
4753				/* He's alive so give him credit */
4754				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4755					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4756					    stcb->asoc.overall_error_count,
4757					    0,
4758					    SCTP_FROM_SCTP_INPUT,
4759					    __LINE__);
4760				}
4761				stcb->asoc.overall_error_count = 0;
4762			}
4763			break;
4764		case SCTP_HEARTBEAT_ACK:
4765			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
4766			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
4767				/* Its not ours */
4768				*offset = length;
4769				if (locked_tcb) {
4770					SCTP_TCB_UNLOCK(locked_tcb);
4771				}
4772				return (NULL);
4773			}
4774			/* He's alive so give him credit */
4775			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4776				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4777				    stcb->asoc.overall_error_count,
4778				    0,
4779				    SCTP_FROM_SCTP_INPUT,
4780				    __LINE__);
4781			}
4782			stcb->asoc.overall_error_count = 0;
4783			SCTP_STAT_INCR(sctps_recvheartbeatack);
4784			if (netp && *netp)
4785				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
4786				    stcb, *netp);
4787			break;
4788		case SCTP_ABORT_ASSOCIATION:
4789			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
4790			    stcb);
4791			if ((stcb) && netp && *netp)
4792				sctp_handle_abort((struct sctp_abort_chunk *)ch,
4793				    stcb, *netp);
4794			*offset = length;
4795			return (NULL);
4796			break;
4797		case SCTP_SHUTDOWN:
4798			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
4799			    stcb);
4800			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
4801				*offset = length;
4802				if (locked_tcb) {
4803					SCTP_TCB_UNLOCK(locked_tcb);
4804				}
4805				return (NULL);
4806			}
4807			if (netp && *netp) {
4808				int abort_flag = 0;
4809
4810				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
4811				    stcb, *netp, &abort_flag);
4812				if (abort_flag) {
4813					*offset = length;
4814					return (NULL);
4815				}
4816			}
4817			break;
4818		case SCTP_SHUTDOWN_ACK:
4819			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
4820			if ((stcb) && (netp) && (*netp))
4821				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
4822			*offset = length;
4823			return (NULL);
4824			break;
4825
4826		case SCTP_OPERATION_ERROR:
4827			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
4828			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
4829
4830				*offset = length;
4831				return (NULL);
4832			}
4833			break;
4834		case SCTP_COOKIE_ECHO:
4835			SCTPDBG(SCTP_DEBUG_INPUT3,
4836			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
4837			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4838				;
4839			} else {
4840				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4841					/* We are not interested anymore */
4842			abend:
4843					if (stcb) {
4844						SCTP_TCB_UNLOCK(stcb);
4845					}
4846					*offset = length;
4847					return (NULL);
4848				}
4849			}
4850			/*
4851			 * First are we accepting? We do this again here
4852			 * since it is possible that a previous endpoint WAS
4853			 * listening responded to a INIT-ACK and then
4854			 * closed. We opened and bound.. and are now no
4855			 * longer listening.
4856			 */
4857
4858			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
4859				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4860				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
4861					struct mbuf *oper;
4862					struct sctp_paramhdr *phdr;
4863
4864					oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4865					    0, M_DONTWAIT, 1, MT_DATA);
4866					if (oper) {
4867						SCTP_BUF_LEN(oper) =
4868						    sizeof(struct sctp_paramhdr);
4869						phdr = mtod(oper,
4870						    struct sctp_paramhdr *);
4871						phdr->param_type =
4872						    htons(SCTP_CAUSE_OUT_OF_RESC);
4873						phdr->param_length =
4874						    htons(sizeof(struct sctp_paramhdr));
4875					}
4876					sctp_abort_association(inp, stcb, m,
4877					    iphlen, sh, oper, vrf_id, port);
4878				}
4879				*offset = length;
4880				return (NULL);
4881			} else {
4882				struct mbuf *ret_buf;
4883				struct sctp_inpcb *linp;
4884
4885				if (stcb) {
4886					linp = NULL;
4887				} else {
4888					linp = inp;
4889				}
4890
4891				if (linp) {
4892					SCTP_ASOC_CREATE_LOCK(linp);
4893					if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4894					    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4895						SCTP_ASOC_CREATE_UNLOCK(linp);
4896						goto abend;
4897					}
4898				}
4899				if (netp) {
4900					ret_buf =
4901					    sctp_handle_cookie_echo(m, iphlen,
4902					    *offset, sh,
4903					    (struct sctp_cookie_echo_chunk *)ch,
4904					    &inp, &stcb, netp,
4905					    auth_skipped,
4906					    auth_offset,
4907					    auth_len,
4908					    &locked_tcb,
4909					    vrf_id,
4910					    port);
4911				} else {
4912					ret_buf = NULL;
4913				}
4914				if (linp) {
4915					SCTP_ASOC_CREATE_UNLOCK(linp);
4916				}
4917				if (ret_buf == NULL) {
4918					if (locked_tcb) {
4919						SCTP_TCB_UNLOCK(locked_tcb);
4920					}
4921					SCTPDBG(SCTP_DEBUG_INPUT3,
4922					    "GAK, null buffer\n");
4923					auth_skipped = 0;
4924					*offset = length;
4925					return (NULL);
4926				}
4927				/* if AUTH skipped, see if it verified... */
4928				if (auth_skipped) {
4929					got_auth = 1;
4930					auth_skipped = 0;
4931				}
4932				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4933					/*
4934					 * Restart the timer if we have
4935					 * pending data
4936					 */
4937					struct sctp_tmit_chunk *chk;
4938
4939					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4940					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
4941				}
4942			}
4943			break;
4944		case SCTP_COOKIE_ACK:
4945			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4946			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4947				if (locked_tcb) {
4948					SCTP_TCB_UNLOCK(locked_tcb);
4949				}
4950				return (NULL);
4951			}
4952			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4953				/* We are not interested anymore */
4954				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4955					;
4956				} else if (stcb) {
4957#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4958					so = SCTP_INP_SO(inp);
4959					atomic_add_int(&stcb->asoc.refcnt, 1);
4960					SCTP_TCB_UNLOCK(stcb);
4961					SCTP_SOCKET_LOCK(so, 1);
4962					SCTP_TCB_LOCK(stcb);
4963					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4964#endif
4965					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4966#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4967					SCTP_SOCKET_UNLOCK(so, 1);
4968#endif
4969					*offset = length;
4970					return (NULL);
4971				}
4972			}
4973			/* He's alive so give him credit */
4974			if ((stcb) && netp && *netp) {
4975				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4976					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4977					    stcb->asoc.overall_error_count,
4978					    0,
4979					    SCTP_FROM_SCTP_INPUT,
4980					    __LINE__);
4981				}
4982				stcb->asoc.overall_error_count = 0;
4983				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4984			}
4985			break;
4986		case SCTP_ECN_ECHO:
4987			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4988			/* He's alive so give him credit */
4989			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4990				/* Its not ours */
4991				if (locked_tcb) {
4992					SCTP_TCB_UNLOCK(locked_tcb);
4993				}
4994				*offset = length;
4995				return (NULL);
4996			}
4997			if (stcb) {
4998				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4999					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5000					    stcb->asoc.overall_error_count,
5001					    0,
5002					    SCTP_FROM_SCTP_INPUT,
5003					    __LINE__);
5004				}
5005				stcb->asoc.overall_error_count = 0;
5006				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
5007				    stcb);
5008			}
5009			break;
5010		case SCTP_ECN_CWR:
5011			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
5012			/* He's alive so give him credit */
5013			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5014				/* Its not ours */
5015				if (locked_tcb) {
5016					SCTP_TCB_UNLOCK(locked_tcb);
5017				}
5018				*offset = length;
5019				return (NULL);
5020			}
5021			if (stcb) {
5022				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5023					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5024					    stcb->asoc.overall_error_count,
5025					    0,
5026					    SCTP_FROM_SCTP_INPUT,
5027					    __LINE__);
5028				}
5029				stcb->asoc.overall_error_count = 0;
5030				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
5031			}
5032			break;
5033		case SCTP_SHUTDOWN_COMPLETE:
5034			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
5035			/* must be first and only chunk */
5036			if ((num_chunks > 1) ||
5037			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5038				*offset = length;
5039				if (locked_tcb) {
5040					SCTP_TCB_UNLOCK(locked_tcb);
5041				}
5042				return (NULL);
5043			}
5044			if ((stcb) && netp && *netp) {
5045				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5046				    stcb, *netp);
5047			}
5048			*offset = length;
5049			return (NULL);
5050			break;
5051		case SCTP_ASCONF:
5052			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5053			/* He's alive so give him credit */
5054			if (stcb) {
5055				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5056					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5057					    stcb->asoc.overall_error_count,
5058					    0,
5059					    SCTP_FROM_SCTP_INPUT,
5060					    __LINE__);
5061				}
5062				stcb->asoc.overall_error_count = 0;
5063				sctp_handle_asconf(m, *offset,
5064				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5065				asconf_cnt++;
5066			}
5067			break;
5068		case SCTP_ASCONF_ACK:
5069			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
5070			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5071				/* Its not ours */
5072				if (locked_tcb) {
5073					SCTP_TCB_UNLOCK(locked_tcb);
5074				}
5075				*offset = length;
5076				return (NULL);
5077			}
5078			if ((stcb) && netp && *netp) {
5079				/* He's alive so give him credit */
5080				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5081					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5082					    stcb->asoc.overall_error_count,
5083					    0,
5084					    SCTP_FROM_SCTP_INPUT,
5085					    __LINE__);
5086				}
5087				stcb->asoc.overall_error_count = 0;
5088				sctp_handle_asconf_ack(m, *offset,
5089				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5090				if (abort_no_unlock)
5091					return (NULL);
5092			}
5093			break;
5094		case SCTP_FORWARD_CUM_TSN:
5095			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
5096			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5097				/* Its not ours */
5098				if (locked_tcb) {
5099					SCTP_TCB_UNLOCK(locked_tcb);
5100				}
5101				*offset = length;
5102				return (NULL);
5103			}
5104			/* He's alive so give him credit */
5105			if (stcb) {
5106				int abort_flag = 0;
5107
5108				stcb->asoc.overall_error_count = 0;
5109				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5110					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5111					    stcb->asoc.overall_error_count,
5112					    0,
5113					    SCTP_FROM_SCTP_INPUT,
5114					    __LINE__);
5115				}
5116				*fwd_tsn_seen = 1;
5117				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5118					/* We are not interested anymore */
5119#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5120					so = SCTP_INP_SO(inp);
5121					atomic_add_int(&stcb->asoc.refcnt, 1);
5122					SCTP_TCB_UNLOCK(stcb);
5123					SCTP_SOCKET_LOCK(so, 1);
5124					SCTP_TCB_LOCK(stcb);
5125					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5126#endif
5127					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5128#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5129					SCTP_SOCKET_UNLOCK(so, 1);
5130#endif
5131					*offset = length;
5132					return (NULL);
5133				}
5134				sctp_handle_forward_tsn(stcb,
5135				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5136				if (abort_flag) {
5137					*offset = length;
5138					return (NULL);
5139				} else {
5140					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5141						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5142						    stcb->asoc.overall_error_count,
5143						    0,
5144						    SCTP_FROM_SCTP_INPUT,
5145						    __LINE__);
5146					}
5147					stcb->asoc.overall_error_count = 0;
5148				}
5149
5150			}
5151			break;
5152		case SCTP_STREAM_RESET:
5153			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5154			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5155				/* Its not ours */
5156				if (locked_tcb) {
5157					SCTP_TCB_UNLOCK(locked_tcb);
5158				}
5159				*offset = length;
5160				return (NULL);
5161			}
5162			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5163				/* We are not interested anymore */
5164#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5165				so = SCTP_INP_SO(inp);
5166				atomic_add_int(&stcb->asoc.refcnt, 1);
5167				SCTP_TCB_UNLOCK(stcb);
5168				SCTP_SOCKET_LOCK(so, 1);
5169				SCTP_TCB_LOCK(stcb);
5170				atomic_subtract_int(&stcb->asoc.refcnt, 1);
5171#endif
5172				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5173#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5174				SCTP_SOCKET_UNLOCK(so, 1);
5175#endif
5176				*offset = length;
5177				return (NULL);
5178			}
5179			if (stcb->asoc.peer_supports_strreset == 0) {
5180				/*
5181				 * hmm, peer should have announced this, but
5182				 * we will turn it on since he is sending us
5183				 * a stream reset.
5184				 */
5185				stcb->asoc.peer_supports_strreset = 1;
5186			}
5187			if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
5188				/* stop processing */
5189				*offset = length;
5190				return (NULL);
5191			}
5192			break;
5193		case SCTP_PACKET_DROPPED:
5194			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5195			/* re-get it all please */
5196			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5197				/* Its not ours */
5198				if (locked_tcb) {
5199					SCTP_TCB_UNLOCK(locked_tcb);
5200				}
5201				*offset = length;
5202				return (NULL);
5203			}
5204			if (ch && (stcb) && netp && (*netp)) {
5205				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5206				    stcb, *netp,
5207				    min(chk_length, (sizeof(chunk_buf) - 4)));
5208
5209			}
5210			break;
5211
5212		case SCTP_AUTHENTICATION:
5213			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5214			if (SCTP_BASE_SYSCTL(sctp_auth_disable))
5215				goto unknown_chunk;
5216
5217			if (stcb == NULL) {
5218				/* save the first AUTH for later processing */
5219				if (auth_skipped == 0) {
5220					auth_offset = *offset;
5221					auth_len = chk_length;
5222					auth_skipped = 1;
5223				}
5224				/* skip this chunk (temporarily) */
5225				goto next_chunk;
5226			}
5227			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5228			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5229			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5230				/* Its not ours */
5231				if (locked_tcb) {
5232					SCTP_TCB_UNLOCK(locked_tcb);
5233				}
5234				*offset = length;
5235				return (NULL);
5236			}
5237			if (got_auth == 1) {
5238				/* skip this chunk... it's already auth'd */
5239				goto next_chunk;
5240			}
5241			got_auth = 1;
5242			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5243			    m, *offset)) {
5244				/* auth HMAC failed so dump the packet */
5245				*offset = length;
5246				return (stcb);
5247			} else {
5248				/* remaining chunks are HMAC checked */
5249				stcb->asoc.authenticated = 1;
5250			}
5251			break;
5252
5253		default:
5254	unknown_chunk:
5255			/* it's an unknown chunk! */
5256			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5257				struct mbuf *mm;
5258				struct sctp_paramhdr *phd;
5259
5260				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
5261				    0, M_DONTWAIT, 1, MT_DATA);
5262				if (mm) {
5263					phd = mtod(mm, struct sctp_paramhdr *);
5264					/*
5265					 * We cheat and use param type since
5266					 * we did not bother to define a
5267					 * error cause struct. They are the
5268					 * same basic format with different
5269					 * names.
5270					 */
5271					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5272					phd->param_length = htons(chk_length + sizeof(*phd));
5273					SCTP_BUF_LEN(mm) = sizeof(*phd);
5274					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
5275					    M_DONTWAIT);
5276					if (SCTP_BUF_NEXT(mm)) {
5277#ifdef SCTP_MBUF_LOGGING
5278						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5279							struct mbuf *mat;
5280
5281							mat = SCTP_BUF_NEXT(mm);
5282							while (mat) {
5283								if (SCTP_BUF_IS_EXTENDED(mat)) {
5284									sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5285								}
5286								mat = SCTP_BUF_NEXT(mat);
5287							}
5288						}
5289#endif
5290						sctp_queue_op_err(stcb, mm);
5291					} else {
5292						sctp_m_freem(mm);
5293					}
5294				}
5295			}
5296			if ((ch->chunk_type & 0x80) == 0) {
5297				/* discard this packet */
5298				*offset = length;
5299				return (stcb);
5300			}	/* else skip this bad chunk and continue... */
5301			break;
5302		}		/* switch (ch->chunk_type) */
5303
5304
5305next_chunk:
5306		/* get the next chunk */
5307		*offset += SCTP_SIZE32(chk_length);
5308		if (*offset >= length) {
5309			/* no more data left in the mbuf chain */
5310			break;
5311		}
5312		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5313		    sizeof(struct sctp_chunkhdr), chunk_buf);
5314		if (ch == NULL) {
5315			if (locked_tcb) {
5316				SCTP_TCB_UNLOCK(locked_tcb);
5317			}
5318			*offset = length;
5319			return (NULL);
5320		}
5321	}			/* while */
5322
5323	if (asconf_cnt > 0 && stcb != NULL) {
5324		sctp_send_asconf_ack(stcb);
5325	}
5326	return (stcb);
5327}
5328
5329
5330/*
5331 * Process the ECN bits we have something set so we must look to see if it is
5332 * ECN(0) or ECN(1) or CE
5333 */
5334static void
5335sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
5336    uint8_t ecn_bits)
5337{
5338	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
5339		;
5340	} else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
5341		/*
5342		 * we only add to the nonce sum for ECT1, ECT0 does not
5343		 * change the NS bit (that we have yet to find a way to send
5344		 * it yet).
5345		 */
5346
5347		/* ECN Nonce stuff */
5348		stcb->asoc.receiver_nonce_sum++;
5349		stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
5350
5351		/*
5352		 * Drag up the last_echo point if cumack is larger since we
5353		 * don't want the point falling way behind by more than
5354		 * 2^^31 and then having it be incorrect.
5355		 */
5356		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
5357		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
5358			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
5359		}
5360	} else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
5361		/*
5362		 * Drag up the last_echo point if cumack is larger since we
5363		 * don't want the point falling way behind by more than
5364		 * 2^^31 and then having it be incorrect.
5365		 */
5366		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
5367		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
5368			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
5369		}
5370	}
5371}
5372
5373static void
5374sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
5375    uint32_t high_tsn, uint8_t ecn_bits)
5376{
5377	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
5378		/*
5379		 * we possibly must notify the sender that a congestion
5380		 * window reduction is in order. We do this by adding a ECNE
5381		 * chunk to the output chunk queue. The incoming CWR will
5382		 * remove this chunk.
5383		 */
5384		if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
5385		    MAX_TSN)) {
5386			/* Yep, we need to add a ECNE */
5387			sctp_send_ecn_echo(stcb, net, high_tsn);
5388			stcb->asoc.last_echo_tsn = high_tsn;
5389		}
5390	}
5391}
5392
5393#ifdef INVARIANTS
5394#ifdef __GNUC__
5395__attribute__((noinline))
5396#endif
5397	void
5398	     sctp_validate_no_locks(struct sctp_inpcb *inp)
5399{
5400	struct sctp_tcb *lstcb;
5401
5402	LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) {
5403		if (mtx_owned(&lstcb->tcb_mtx)) {
5404			panic("Own lock on stcb at return from input");
5405		}
5406	}
5407	if (mtx_owned(&inp->inp_create_mtx)) {
5408		panic("Own create lock on inp");
5409	}
5410	if (mtx_owned(&inp->inp_mtx)) {
5411		panic("Own inp lock on inp");
5412	}
5413}
5414
5415#endif
5416
5417/*
5418 * common input chunk processing (v4 and v6)
5419 */
5420void
5421sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
5422    int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
5423    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
5424    uint8_t ecn_bits, uint32_t vrf_id, uint16_t port)
5425{
5426	/*
5427	 * Control chunk processing
5428	 */
5429	uint32_t high_tsn;
5430	int fwd_tsn_seen = 0, data_processed = 0;
5431	struct mbuf *m = *mm;
5432	int abort_flag = 0;
5433	int un_sent;
5434
5435	SCTP_STAT_INCR(sctps_recvdatagrams);
5436#ifdef SCTP_AUDITING_ENABLED
5437	sctp_audit_log(0xE0, 1);
5438	sctp_auditing(0, inp, stcb, net);
5439#endif
5440
5441	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5442	    m, iphlen, offset, length, stcb);
5443	if (stcb) {
5444		/* always clear this before beginning a packet */
5445		stcb->asoc.authenticated = 0;
5446		stcb->asoc.seen_a_sack_this_pkt = 0;
5447		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5448		    stcb, stcb->asoc.state);
5449
5450		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5451		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5452			/*-
5453			 * If we hit here, we had a ref count
5454			 * up when the assoc was aborted and the
5455			 * timer is clearing out the assoc, we should
5456			 * NOT respond to any packet.. its OOTB.
5457			 */
5458			SCTP_TCB_UNLOCK(stcb);
5459			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5460			    vrf_id, port);
5461			goto out_now;
5462		}
5463	}
5464	if (IS_SCTP_CONTROL(ch)) {
5465		/* process the control portion of the SCTP packet */
5466		/* sa_ignore NO_NULL_CHK */
5467		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
5468		    inp, stcb, &net, &fwd_tsn_seen, vrf_id, port);
5469		if (stcb) {
5470			/*
5471			 * This covers us if the cookie-echo was there and
5472			 * it changes our INP.
5473			 */
5474			inp = stcb->sctp_ep;
5475			if ((net) && (port)) {
5476				if (net->port == 0) {
5477					sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5478				}
5479				net->port = port;
5480			}
5481		}
5482	} else {
5483		/*
5484		 * no control chunks, so pre-process DATA chunks (these
5485		 * checks are taken care of by control processing)
5486		 */
5487
5488		/*
5489		 * if DATA only packet, and auth is required, then punt...
5490		 * can't have authenticated without any AUTH (control)
5491		 * chunks
5492		 */
5493		if ((stcb != NULL) &&
5494		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5495		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5496			/* "silently" ignore */
5497			SCTP_STAT_INCR(sctps_recvauthmissing);
5498			SCTP_TCB_UNLOCK(stcb);
5499			goto out_now;
5500		}
5501		if (stcb == NULL) {
5502			/* out of the blue DATA chunk */
5503			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5504			    vrf_id, port);
5505			goto out_now;
5506		}
5507		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5508			/* v_tag mismatch! */
5509			SCTP_STAT_INCR(sctps_badvtag);
5510			SCTP_TCB_UNLOCK(stcb);
5511			goto out_now;
5512		}
5513	}
5514
5515	if (stcb == NULL) {
5516		/*
5517		 * no valid TCB for this packet, or we found it's a bad
5518		 * packet while processing control, or we're done with this
5519		 * packet (done or skip rest of data), so we drop it...
5520		 */
5521		goto out_now;
5522	}
5523	/*
5524	 * DATA chunk processing
5525	 */
5526	/* plow through the data chunks while length > offset */
5527
5528	/*
5529	 * Rest should be DATA only.  Check authentication state if AUTH for
5530	 * DATA is required.
5531	 */
5532	if ((length > offset) &&
5533	    (stcb != NULL) &&
5534	    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5535	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5536	    !stcb->asoc.authenticated) {
5537		/* "silently" ignore */
5538		SCTP_STAT_INCR(sctps_recvauthmissing);
5539		SCTPDBG(SCTP_DEBUG_AUTH1,
5540		    "Data chunk requires AUTH, skipped\n");
5541		goto trigger_send;
5542	}
5543	if (length > offset) {
5544		int retval;
5545
5546		/*
5547		 * First check to make sure our state is correct. We would
5548		 * not get here unless we really did have a tag, so we don't
5549		 * abort if this happens, just dump the chunk silently.
5550		 */
5551		switch (SCTP_GET_STATE(&stcb->asoc)) {
5552		case SCTP_STATE_COOKIE_ECHOED:
5553			/*
5554			 * we consider data with valid tags in this state
5555			 * shows us the cookie-ack was lost. Imply it was
5556			 * there.
5557			 */
5558			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5559				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5560				    stcb->asoc.overall_error_count,
5561				    0,
5562				    SCTP_FROM_SCTP_INPUT,
5563				    __LINE__);
5564			}
5565			stcb->asoc.overall_error_count = 0;
5566			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5567			break;
5568		case SCTP_STATE_COOKIE_WAIT:
5569			/*
5570			 * We consider OOTB any data sent during asoc setup.
5571			 */
5572			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5573			    vrf_id, port);
5574			SCTP_TCB_UNLOCK(stcb);
5575			goto out_now;
5576			/* sa_ignore NOTREACHED */
5577			break;
5578		case SCTP_STATE_EMPTY:	/* should not happen */
5579		case SCTP_STATE_INUSE:	/* should not happen */
5580		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5581		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5582		default:
5583			SCTP_TCB_UNLOCK(stcb);
5584			goto out_now;
5585			/* sa_ignore NOTREACHED */
5586			break;
5587		case SCTP_STATE_OPEN:
5588		case SCTP_STATE_SHUTDOWN_SENT:
5589			break;
5590		}
5591		/* take care of ECN, part 1. */
5592		if (stcb->asoc.ecn_allowed &&
5593		    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5594			sctp_process_ecn_marked_a(stcb, net, ecn_bits);
5595		}
5596		/* plow through the data chunks while length > offset */
5597		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
5598		    inp, stcb, net, &high_tsn);
5599		if (retval == 2) {
5600			/*
5601			 * The association aborted, NO UNLOCK needed since
5602			 * the association is destroyed.
5603			 */
5604			goto out_now;
5605		}
5606		data_processed = 1;
5607		if (retval == 0) {
5608			/* take care of ecn part 2. */
5609			if (stcb->asoc.ecn_allowed &&
5610			    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5611				sctp_process_ecn_marked_b(stcb, net, high_tsn,
5612				    ecn_bits);
5613			}
5614		}
5615		/*
5616		 * Anything important needs to have been m_copy'ed in
5617		 * process_data
5618		 */
5619	}
5620	if ((data_processed == 0) && (fwd_tsn_seen)) {
5621		int was_a_gap;
5622		uint32_t highest_tsn;
5623
5624		if (compare_with_wrap(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map, MAX_TSN)) {
5625			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
5626		} else {
5627			highest_tsn = stcb->asoc.highest_tsn_inside_map;
5628		}
5629		was_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
5630		stcb->asoc.send_sack = 1;
5631		sctp_sack_check(stcb, was_a_gap, &abort_flag);
5632		if (abort_flag) {
5633			/* Again, we aborted so NO UNLOCK needed */
5634			goto out_now;
5635		}
5636	} else if (fwd_tsn_seen) {
5637		stcb->asoc.send_sack = 1;
5638	}
5639	/* trigger send of any chunks in queue... */
5640trigger_send:
5641#ifdef SCTP_AUDITING_ENABLED
5642	sctp_audit_log(0xE0, 2);
5643	sctp_auditing(1, inp, stcb, net);
5644#endif
5645	SCTPDBG(SCTP_DEBUG_INPUT1,
5646	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5647	    stcb->asoc.peers_rwnd,
5648	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5649	    stcb->asoc.total_flight);
5650	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5651
5652	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
5653	    ((un_sent) &&
5654	    (stcb->asoc.peers_rwnd > 0 ||
5655	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5656		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5657		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5658		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5659	}
5660#ifdef SCTP_AUDITING_ENABLED
5661	sctp_audit_log(0xE0, 3);
5662	sctp_auditing(2, inp, stcb, net);
5663#endif
5664	SCTP_TCB_UNLOCK(stcb);
5665out_now:
5666#ifdef INVARIANTS
5667	sctp_validate_no_locks(inp);
5668#endif
5669	return;
5670}
5671
5672#if 0
5673static void
5674sctp_print_mbuf_chain(struct mbuf *m)
5675{
5676	for (; m; m = SCTP_BUF_NEXT(m)) {
5677		printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m));
5678		if (SCTP_BUF_IS_EXTENDED(m))
5679			printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m));
5680	}
5681}
5682
5683#endif
5684
5685void
5686sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
5687{
5688#ifdef SCTP_MBUF_LOGGING
5689	struct mbuf *mat;
5690
5691#endif
5692	struct mbuf *m;
5693	int iphlen;
5694	uint32_t vrf_id = 0;
5695	uint8_t ecn_bits;
5696	struct ip *ip;
5697	struct sctphdr *sh;
5698	struct sctp_inpcb *inp = NULL;
5699	struct sctp_nets *net;
5700	struct sctp_tcb *stcb = NULL;
5701	struct sctp_chunkhdr *ch;
5702	int refcount_up = 0;
5703	int length, mlen, offset;
5704
5705#if !defined(SCTP_WITH_NO_CSUM)
5706	uint32_t check, calc_check;
5707
5708#endif
5709
5710	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5711		SCTP_RELEASE_PKT(i_pak);
5712		return;
5713	}
5714	mlen = SCTP_HEADER_LEN(i_pak);
5715	iphlen = off;
5716	m = SCTP_HEADER_TO_CHAIN(i_pak);
5717
5718	net = NULL;
5719	SCTP_STAT_INCR(sctps_recvpackets);
5720	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5721
5722
5723#ifdef SCTP_MBUF_LOGGING
5724	/* Log in any input mbufs */
5725	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5726		mat = m;
5727		while (mat) {
5728			if (SCTP_BUF_IS_EXTENDED(mat)) {
5729				sctp_log_mb(mat, SCTP_MBUF_INPUT);
5730			}
5731			mat = SCTP_BUF_NEXT(mat);
5732		}
5733	}
5734#endif
5735#ifdef  SCTP_PACKET_LOGGING
5736	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
5737		sctp_packet_log(m, mlen);
5738#endif
5739	/*
5740	 * Must take out the iphlen, since mlen expects this (only effect lb
5741	 * case)
5742	 */
5743	mlen -= iphlen;
5744
5745	/*
5746	 * Get IP, SCTP, and first chunk header together in first mbuf.
5747	 */
5748	ip = mtod(m, struct ip *);
5749	offset = iphlen + sizeof(*sh) + sizeof(*ch);
5750	if (SCTP_BUF_LEN(m) < offset) {
5751		if ((m = m_pullup(m, offset)) == 0) {
5752			SCTP_STAT_INCR(sctps_hdrops);
5753			return;
5754		}
5755		ip = mtod(m, struct ip *);
5756	}
5757	/* validate mbuf chain length with IP payload length */
5758	if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) {
5759		SCTP_STAT_INCR(sctps_hdrops);
5760		goto bad;
5761	}
5762	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5763	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
5764	SCTPDBG(SCTP_DEBUG_INPUT1,
5765	    "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
5766
5767	/* SCTP does not allow broadcasts or multicasts */
5768	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
5769		goto bad;
5770	}
5771	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
5772		/*
5773		 * We only look at broadcast if its a front state, All
5774		 * others we will not have a tcb for anyway.
5775		 */
5776		goto bad;
5777	}
5778	/* validate SCTP checksum */
5779	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
5780	    "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
5781	    m->m_pkthdr.len,
5782	    if_name(m->m_pkthdr.rcvif),
5783	    m->m_pkthdr.csum_flags);
5784#if defined(SCTP_WITH_NO_CSUM)
5785	SCTP_STAT_INCR(sctps_recvnocrc);
5786#else
5787	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
5788		SCTP_STAT_INCR(sctps_recvhwcrc);
5789		goto sctp_skip_csum_4;
5790	}
5791	check = sh->checksum;	/* save incoming checksum */
5792	sh->checksum = 0;	/* prepare for calc */
5793	calc_check = sctp_calculate_cksum(m, iphlen);
5794	sh->checksum = check;
5795	SCTP_STAT_INCR(sctps_recvswcrc);
5796	if (calc_check != check) {
5797		SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5798		    calc_check, check, m, mlen, iphlen);
5799
5800		stcb = sctp_findassociation_addr(m, iphlen,
5801		    offset - sizeof(*ch),
5802		    sh, ch, &inp, &net,
5803		    vrf_id);
5804		if ((net) && (port)) {
5805			if (net->port == 0) {
5806				sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5807			}
5808			net->port = port;
5809		}
5810		if ((inp) && (stcb)) {
5811			sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
5812			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5813		} else if ((inp != NULL) && (stcb == NULL)) {
5814			refcount_up = 1;
5815		}
5816		SCTP_STAT_INCR(sctps_badsum);
5817		SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5818		goto bad;
5819	}
5820sctp_skip_csum_4:
5821#endif
5822	/* destination port of 0 is illegal, based on RFC2960. */
5823	if (sh->dest_port == 0) {
5824		SCTP_STAT_INCR(sctps_hdrops);
5825		goto bad;
5826	}
5827	/*
5828	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
5829	 * IP/SCTP/first chunk header...
5830	 */
5831	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
5832	    sh, ch, &inp, &net, vrf_id);
5833	if ((net) && (port)) {
5834		if (net->port == 0) {
5835			sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5836		}
5837		net->port = port;
5838	}
5839	/* inp's ref-count increased && stcb locked */
5840	if (inp == NULL) {
5841		struct sctp_init_chunk *init_chk, chunk_buf;
5842
5843		SCTP_STAT_INCR(sctps_noport);
5844#ifdef ICMP_BANDLIM
5845		/*
5846		 * we use the bandwidth limiting to protect against sending
5847		 * too many ABORTS all at once. In this case these count the
5848		 * same as an ICMP message.
5849		 */
5850		if (badport_bandlim(0) < 0)
5851			goto bad;
5852#endif				/* ICMP_BANDLIM */
5853		SCTPDBG(SCTP_DEBUG_INPUT1,
5854		    "Sending a ABORT from packet entry!\n");
5855		if (ch->chunk_type == SCTP_INITIATION) {
5856			/*
5857			 * we do a trick here to get the INIT tag, dig in
5858			 * and get the tag from the INIT and put it in the
5859			 * common header.
5860			 */
5861			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
5862			    iphlen + sizeof(*sh), sizeof(*init_chk),
5863			    (uint8_t *) & chunk_buf);
5864			if (init_chk != NULL)
5865				sh->v_tag = init_chk->init.initiate_tag;
5866		}
5867		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5868			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
5869			goto bad;
5870		}
5871		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5872			goto bad;
5873		}
5874		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
5875			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
5876		goto bad;
5877	} else if (stcb == NULL) {
5878		refcount_up = 1;
5879	}
5880#ifdef IPSEC
5881	/*
5882	 * I very much doubt any of the IPSEC stuff will work but I have no
5883	 * idea, so I will leave it in place.
5884	 */
5885	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5886		MODULE_GLOBAL(ipsec4stat).in_polvio++;
5887		SCTP_STAT_INCR(sctps_hdrops);
5888		goto bad;
5889	}
5890#endif				/* IPSEC */
5891
5892	/*
5893	 * common chunk processing
5894	 */
5895	length = ip->ip_len + iphlen;
5896	offset -= sizeof(struct sctp_chunkhdr);
5897
5898	ecn_bits = ip->ip_tos;
5899
5900	/* sa_ignore NO_NULL_CHK */
5901	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
5902	    inp, stcb, net, ecn_bits, vrf_id, port);
5903	/* inp's ref-count reduced && stcb unlocked */
5904	if (m) {
5905		sctp_m_freem(m);
5906	}
5907	if ((inp) && (refcount_up)) {
5908		/* reduce ref-count */
5909		SCTP_INP_DECR_REF(inp);
5910	}
5911	return;
5912bad:
5913	if (stcb) {
5914		SCTP_TCB_UNLOCK(stcb);
5915	}
5916	if ((inp) && (refcount_up)) {
5917		/* reduce ref-count */
5918		SCTP_INP_DECR_REF(inp);
5919	}
5920	if (m) {
5921		sctp_m_freem(m);
5922	}
5923	return;
5924}
5925void
5926sctp_input(i_pak, off)
5927	struct mbuf *i_pak;
5928	int off;
5929{
5930	sctp_input_with_port(i_pak, off, 0);
5931}
5932