sctp_input.c revision 196260
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 196260 2009-08-15 21:10:52Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_auth.h>
45#include <netinet/sctp_indata.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_bsd_addr.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/sctp_crc32.h>
50#include <netinet/udp.h>
51
52
53
54static void
55sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
56{
57	struct sctp_nets *net;
58
59	/*
60	 * This now not only stops all cookie timers it also stops any INIT
61	 * timers as well. This will make sure that the timers are stopped
62	 * in all collision cases.
63	 */
64	SCTP_TCB_LOCK_ASSERT(stcb);
65	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
66		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
67			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
68			    stcb->sctp_ep,
69			    stcb,
70			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
71		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
72			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
73			    stcb->sctp_ep,
74			    stcb,
75			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
76		}
77	}
78}
79
80/* INIT handler */
81static void
82sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
83    struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
84    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port)
85{
86	struct sctp_init *init;
87	struct mbuf *op_err;
88	uint32_t init_limit;
89
90	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
91	    stcb);
92	if (stcb == NULL) {
93		SCTP_INP_RLOCK(inp);
94		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
95			goto outnow;
96		}
97	}
98	op_err = NULL;
99	init = &cp->init;
100	/* First are we accepting? */
101	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
102		SCTPDBG(SCTP_DEBUG_INPUT2,
103		    "sctp_handle_init: Abort, so_qlimit:%d\n",
104		    inp->sctp_socket->so_qlimit);
105		/*
106		 * FIX ME ?? What about TCP model and we have a
107		 * match/restart case? Actually no fix is needed. the lookup
108		 * will always find the existing assoc so stcb would not be
109		 * NULL. It may be questionable to do this since we COULD
110		 * just send back the INIT-ACK and hope that the app did
111		 * accept()'s by the time the COOKIE was sent. But there is
112		 * a price to pay for COOKIE generation and I don't want to
113		 * pay it on the chance that the app will actually do some
114		 * accepts(). The App just looses and should NOT be in this
115		 * state :-)
116		 */
117		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
118		    vrf_id, port);
119		if (stcb)
120			*abort_no_unlock = 1;
121		goto outnow;
122	}
123	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
124		/* Invalid length */
125		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
126		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
127		    vrf_id, port);
128		if (stcb)
129			*abort_no_unlock = 1;
130		goto outnow;
131	}
132	/* validate parameters */
133	if (init->initiate_tag == 0) {
134		/* protocol error... send abort */
135		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
136		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
137		    vrf_id, port);
138		if (stcb)
139			*abort_no_unlock = 1;
140		goto outnow;
141	}
142	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
143		/* invalid parameter... send abort */
144		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
145		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
146		    vrf_id, port);
147		if (stcb)
148			*abort_no_unlock = 1;
149		goto outnow;
150	}
151	if (init->num_inbound_streams == 0) {
152		/* protocol error... send abort */
153		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
154		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
155		    vrf_id, port);
156		if (stcb)
157			*abort_no_unlock = 1;
158		goto outnow;
159	}
160	if (init->num_outbound_streams == 0) {
161		/* protocol error... send abort */
162		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
163		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
164		    vrf_id, port);
165		if (stcb)
166			*abort_no_unlock = 1;
167		goto outnow;
168	}
169	init_limit = offset + ntohs(cp->ch.chunk_length);
170	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
171	    init_limit)) {
172		/* auth parameter(s) error... send abort */
173		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port);
174		if (stcb)
175			*abort_no_unlock = 1;
176		goto outnow;
177	}
178	/* send an INIT-ACK w/cookie */
179	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
180	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port,
181	    ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
182outnow:
183	if (stcb == NULL) {
184		SCTP_INP_RUNLOCK(inp);
185	}
186}
187
188/*
189 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
190 */
191
192int
193sctp_is_there_unsent_data(struct sctp_tcb *stcb)
194{
195	int unsent_data = 0;
196	struct sctp_stream_queue_pending *sp;
197	struct sctp_stream_out *strq;
198	struct sctp_association *asoc;
199
200	/*
201	 * This function returns the number of streams that have true unsent
202	 * data on them. Note that as it looks through it will clean up any
203	 * places that have old data that has been sent but left at top of
204	 * stream queue.
205	 */
206	asoc = &stcb->asoc;
207	SCTP_TCB_SEND_LOCK(stcb);
208	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
209		/* Check to see if some data queued */
210		TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) {
211	is_there_another:
212			/* sa_ignore FREED_MEMORY */
213			sp = TAILQ_FIRST(&strq->outqueue);
214			if (sp == NULL) {
215				continue;
216			}
217			if ((sp->msg_is_complete) &&
218			    (sp->length == 0) &&
219			    (sp->sender_all_done)) {
220				/*
221				 * We are doing differed cleanup. Last time
222				 * through when we took all the data the
223				 * sender_all_done was not set.
224				 */
225				if (sp->put_last_out == 0) {
226					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
227					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
228					    sp->sender_all_done,
229					    sp->length,
230					    sp->msg_is_complete,
231					    sp->put_last_out);
232				}
233				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
234				TAILQ_REMOVE(&strq->outqueue, sp, next);
235				sctp_free_remote_addr(sp->net);
236				if (sp->data) {
237					sctp_m_freem(sp->data);
238					sp->data = NULL;
239				}
240				sctp_free_a_strmoq(stcb, sp);
241				goto is_there_another;
242			} else {
243				unsent_data++;
244				continue;
245			}
246		}
247	}
248	SCTP_TCB_SEND_UNLOCK(stcb);
249	return (unsent_data);
250}
251
252static int
253sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
254    struct sctp_nets *net)
255{
256	struct sctp_init *init;
257	struct sctp_association *asoc;
258	struct sctp_nets *lnet;
259	unsigned int i;
260
261	init = &cp->init;
262	asoc = &stcb->asoc;
263	/* save off parameters */
264	asoc->peer_vtag = ntohl(init->initiate_tag);
265	asoc->peers_rwnd = ntohl(init->a_rwnd);
266	if (TAILQ_FIRST(&asoc->nets)) {
267		/* update any ssthresh's that may have a default */
268		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
269			lnet->ssthresh = asoc->peers_rwnd;
270
271			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
272				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
273			}
274		}
275	}
276	SCTP_TCB_SEND_LOCK(stcb);
277	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
278		unsigned int newcnt;
279		struct sctp_stream_out *outs;
280		struct sctp_stream_queue_pending *sp;
281		struct sctp_tmit_chunk *chk, *chk_next;
282
283		/* abandon the upper streams */
284		newcnt = ntohs(init->num_inbound_streams);
285		if (!TAILQ_EMPTY(&asoc->send_queue)) {
286			chk = TAILQ_FIRST(&asoc->send_queue);
287			while (chk) {
288				chk_next = TAILQ_NEXT(chk, sctp_next);
289				if (chk->rec.data.stream_number >= newcnt) {
290					TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
291					asoc->send_queue_cnt--;
292					if (chk->data != NULL) {
293						sctp_free_bufspace(stcb, asoc, chk, 1);
294						sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
295						    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, SCTP_SO_NOT_LOCKED);
296						if (chk->data) {
297							sctp_m_freem(chk->data);
298							chk->data = NULL;
299						}
300					}
301					sctp_free_a_chunk(stcb, chk);
302					/* sa_ignore FREED_MEMORY */
303				}
304				chk = chk_next;
305			}
306		}
307		if (asoc->strmout) {
308			for (i = newcnt; i < asoc->pre_open_streams; i++) {
309				outs = &asoc->strmout[i];
310				sp = TAILQ_FIRST(&outs->outqueue);
311				while (sp) {
312					TAILQ_REMOVE(&outs->outqueue, sp, next);
313					asoc->stream_queue_cnt--;
314					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
315					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
316					    sp, SCTP_SO_NOT_LOCKED);
317					if (sp->data) {
318						sctp_m_freem(sp->data);
319						sp->data = NULL;
320					}
321					sctp_free_remote_addr(sp->net);
322					sp->net = NULL;
323					/* Free the chunk */
324					sctp_free_a_strmoq(stcb, sp);
325					/* sa_ignore FREED_MEMORY */
326					sp = TAILQ_FIRST(&outs->outqueue);
327				}
328			}
329		}
330		/* cut back the count */
331		asoc->pre_open_streams = newcnt;
332	}
333	SCTP_TCB_SEND_UNLOCK(stcb);
334	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
335	/* init tsn's */
336	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
337	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
338	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
339	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
340		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
341	}
342	/* This is the next one we expect */
343	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
344
345	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
346	/*
347	 * EY 05/13/08 - nr_sack: initialize nr_mapping array's base tsn
348	 * like above
349	 */
350	asoc->nr_mapping_array_base_tsn = ntohl(init->initial_tsn);
351	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
352	asoc->last_echo_tsn = asoc->asconf_seq_in;
353	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
354	/* open the requested streams */
355
356	if (asoc->strmin != NULL) {
357		/* Free the old ones */
358		struct sctp_queued_to_read *ctl;
359
360		for (i = 0; i < asoc->streamincnt; i++) {
361			ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
362			while (ctl) {
363				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
364				sctp_free_remote_addr(ctl->whoFrom);
365				ctl->whoFrom = NULL;
366				sctp_m_freem(ctl->data);
367				ctl->data = NULL;
368				sctp_free_a_readq(stcb, ctl);
369				ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
370			}
371		}
372		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
373	}
374	asoc->streamincnt = ntohs(init->num_outbound_streams);
375	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
376		asoc->streamincnt = MAX_SCTP_STREAMS;
377	}
378	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
379	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
380	if (asoc->strmin == NULL) {
381		/* we didn't get memory for the streams! */
382		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
383		return (-1);
384	}
385	for (i = 0; i < asoc->streamincnt; i++) {
386		asoc->strmin[i].stream_no = i;
387		asoc->strmin[i].last_sequence_delivered = 0xffff;
388		/*
389		 * U-stream ranges will be set when the cookie is unpacked.
390		 * Or for the INIT sender they are un set (if pr-sctp not
391		 * supported) when the INIT-ACK arrives.
392		 */
393		TAILQ_INIT(&asoc->strmin[i].inqueue);
394		asoc->strmin[i].delivery_started = 0;
395	}
396	/*
397	 * load_address_from_init will put the addresses into the
398	 * association when the COOKIE is processed or the INIT-ACK is
399	 * processed. Both types of COOKIE's existing and new call this
400	 * routine. It will remove addresses that are no longer in the
401	 * association (for the restarting case where addresses are
402	 * removed). Up front when the INIT arrives we will discard it if it
403	 * is a restart and new addresses have been added.
404	 */
405	/* sa_ignore MEMLEAK */
406	return (0);
407}
408
409/*
410 * INIT-ACK message processing/consumption returns value < 0 on error
411 */
412static int
413sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
414    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
415    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
416{
417	struct sctp_association *asoc;
418	struct mbuf *op_err;
419	int retval, abort_flag;
420	uint32_t initack_limit;
421	int nat_friendly = 0;
422
423	/* First verify that we have no illegal param's */
424	abort_flag = 0;
425	op_err = NULL;
426
427	op_err = sctp_arethere_unrecognized_parameters(m,
428	    (offset + sizeof(struct sctp_init_chunk)),
429	    &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
430	if (abort_flag) {
431		/* Send an abort and notify peer */
432		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
433		*abort_no_unlock = 1;
434		return (-1);
435	}
436	asoc = &stcb->asoc;
437	asoc->peer_supports_nat = (uint8_t) nat_friendly;
438	/* process the peer's parameters in the INIT-ACK */
439	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
440	if (retval < 0) {
441		return (retval);
442	}
443	initack_limit = offset + ntohs(cp->ch.chunk_length);
444	/* load all addresses */
445	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
446	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
447	    NULL))) {
448		/* Huh, we should abort */
449		SCTPDBG(SCTP_DEBUG_INPUT1,
450		    "Load addresses from INIT causes an abort %d\n",
451		    retval);
452		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
453		    NULL, 0, net->port);
454		*abort_no_unlock = 1;
455		return (-1);
456	}
457	/* if the peer doesn't support asconf, flush the asconf queue */
458	if (asoc->peer_supports_asconf == 0) {
459		struct sctp_asconf_addr *aparam;
460
461		while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
462			/* sa_ignore FREED_MEMORY */
463			aparam = TAILQ_FIRST(&asoc->asconf_queue);
464			TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
465			SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
466		}
467	}
468	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
469	    stcb->asoc.local_hmacs);
470	if (op_err) {
471		sctp_queue_op_err(stcb, op_err);
472		/* queuing will steal away the mbuf chain to the out queue */
473		op_err = NULL;
474	}
475	/* extract the cookie and queue it to "echo" it back... */
476	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
477		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
478		    stcb->asoc.overall_error_count,
479		    0,
480		    SCTP_FROM_SCTP_INPUT,
481		    __LINE__);
482	}
483	stcb->asoc.overall_error_count = 0;
484	net->error_count = 0;
485
486	/*
487	 * Cancel the INIT timer, We do this first before queueing the
488	 * cookie. We always cancel at the primary to assue that we are
489	 * canceling the timer started by the INIT which always goes to the
490	 * primary.
491	 */
492	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
493	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
494
495	/* calculate the RTO */
496	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy);
497
498	retval = sctp_send_cookie_echo(m, offset, stcb, net);
499	if (retval < 0) {
500		/*
501		 * No cookie, we probably should send a op error. But in any
502		 * case if there is no cookie in the INIT-ACK, we can
503		 * abandon the peer, its broke.
504		 */
505		if (retval == -3) {
506			/* We abort with an error of missing mandatory param */
507			op_err =
508			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
509			if (op_err) {
510				/*
511				 * Expand beyond to include the mandatory
512				 * param cookie
513				 */
514				struct sctp_inv_mandatory_param *mp;
515
516				SCTP_BUF_LEN(op_err) =
517				    sizeof(struct sctp_inv_mandatory_param);
518				mp = mtod(op_err,
519				    struct sctp_inv_mandatory_param *);
520				/* Subtract the reserved param */
521				mp->length =
522				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
523				mp->num_param = htonl(1);
524				mp->param = htons(SCTP_STATE_COOKIE);
525				mp->resv = 0;
526			}
527			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
528			    sh, op_err, 0, net->port);
529			*abort_no_unlock = 1;
530		}
531		return (retval);
532	}
533	return (0);
534}
535
536static void
537sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
538    struct sctp_tcb *stcb, struct sctp_nets *net)
539{
540	struct sockaddr_storage store;
541	struct sockaddr_in *sin;
542	struct sockaddr_in6 *sin6;
543	struct sctp_nets *r_net;
544	struct timeval tv;
545	int req_prim = 0;
546
547	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
548		/* Invalid length */
549		return;
550	}
551	sin = (struct sockaddr_in *)&store;
552	sin6 = (struct sockaddr_in6 *)&store;
553
554	memset(&store, 0, sizeof(store));
555	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
556	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
557		sin->sin_family = cp->heartbeat.hb_info.addr_family;
558		sin->sin_len = cp->heartbeat.hb_info.addr_len;
559		sin->sin_port = stcb->rport;
560		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
561		    sizeof(sin->sin_addr));
562	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
563	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
564		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
565		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
566		sin6->sin6_port = stcb->rport;
567		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
568		    sizeof(sin6->sin6_addr));
569	} else {
570		return;
571	}
572	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
573	if (r_net == NULL) {
574		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
575		return;
576	}
577	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
578	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
579	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
580		/*
581		 * If the its a HB and it's random value is correct when can
582		 * confirm the destination.
583		 */
584		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
585		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
586			stcb->asoc.primary_destination = r_net;
587			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
588			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
589			r_net = TAILQ_FIRST(&stcb->asoc.nets);
590			if (r_net != stcb->asoc.primary_destination) {
591				/*
592				 * first one on the list is NOT the primary
593				 * sctp_cmpaddr() is much more efficent if
594				 * the primary is the first on the list,
595				 * make it so.
596				 */
597				TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
598				TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
599			}
600			req_prim = 1;
601		}
602		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
603		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
604	}
605	r_net->error_count = 0;
606	r_net->hb_responded = 1;
607	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
608	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
609	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
610		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
611		r_net->dest_state |= SCTP_ADDR_REACHABLE;
612		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
613		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED);
614		/* now was it the primary? if so restore */
615		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
616			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
617		}
618	}
619	/*
620	 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state,
621	 * set the destination to active state and set the cwnd to one or
622	 * two MTU's based on whether PF1 or PF2 is being used. If a T3
623	 * timer is running, for the destination, stop the timer because a
624	 * PF-heartbeat was received.
625	 */
626	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
627	    SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
628	    (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
629		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
630			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
631			    stcb, net,
632			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
633		}
634		net->dest_state &= ~SCTP_ADDR_PF;
635		net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
636		SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
637		    net, net->cwnd);
638	}
639	/* Now lets do a RTO with this */
640	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy);
641	/* Mobility adaptation */
642	if (req_prim) {
643		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
644		    SCTP_MOBILITY_BASE) ||
645		    sctp_is_mobility_feature_on(stcb->sctp_ep,
646		    SCTP_MOBILITY_FASTHANDOFF)) &&
647		    sctp_is_mobility_feature_on(stcb->sctp_ep,
648		    SCTP_MOBILITY_PRIM_DELETED)) {
649
650			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
651			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
652			    SCTP_MOBILITY_FASTHANDOFF)) {
653				sctp_assoc_immediate_retrans(stcb,
654				    stcb->asoc.primary_destination);
655			}
656			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
657			    SCTP_MOBILITY_BASE)) {
658				sctp_move_chunks_from_deleted_prim(stcb,
659				    stcb->asoc.primary_destination);
660			}
661			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
662			    stcb->asoc.deleted_primary);
663		}
664	}
665}
666
667static int
668sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
669{
670	/*
671	 * return 0 means we want you to proceed with the abort non-zero
672	 * means no abort processing
673	 */
674	struct sctpasochead *head;
675
676	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
677		/* generate a new vtag and send init */
678		LIST_REMOVE(stcb, sctp_asocs);
679		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
680		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
681		/*
682		 * put it in the bucket in the vtag hash of assoc's for the
683		 * system
684		 */
685		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
686		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
687		return (1);
688	}
689	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
690		/*
691		 * treat like a case where the cookie expired i.e.: - dump
692		 * current cookie. - generate a new vtag. - resend init.
693		 */
694		/* generate a new vtag and send init */
695		LIST_REMOVE(stcb, sctp_asocs);
696		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
697		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
698		sctp_stop_all_cookie_timers(stcb);
699		sctp_toss_old_cookies(stcb, &stcb->asoc);
700		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
701		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
702		/*
703		 * put it in the bucket in the vtag hash of assoc's for the
704		 * system
705		 */
706		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
707		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
708		return (1);
709	}
710	return (0);
711}
712
713static int
714sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
715    struct sctp_nets *net)
716{
717	/*
718	 * return 0 means we want you to proceed with the abort non-zero
719	 * means no abort processing
720	 */
721	if (stcb->asoc.peer_supports_auth == 0) {
722		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
723		return (0);
724	}
725	sctp_asconf_send_nat_state_update(stcb, net);
726	return (1);
727}
728
729
730static void
731sctp_handle_abort(struct sctp_abort_chunk *cp,
732    struct sctp_tcb *stcb, struct sctp_nets *net)
733{
734#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
735	struct socket *so;
736
737#endif
738	uint16_t len;
739
740	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
741	if (stcb == NULL)
742		return;
743
744	len = ntohs(cp->ch.chunk_length);
745	if (len > sizeof(struct sctp_chunkhdr)) {
746		/*
747		 * Need to check the cause codes for our two magic nat
748		 * aborts which don't kill the assoc necessarily.
749		 */
750		struct sctp_abort_chunk *cpnext;
751		struct sctp_missing_nat_state *natc;
752		uint16_t cause;
753
754		cpnext = cp;
755		cpnext++;
756		natc = (struct sctp_missing_nat_state *)cpnext;
757		cause = ntohs(natc->cause);
758		if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) {
759			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
760			    cp->ch.chunk_flags);
761			if (sctp_handle_nat_colliding_state(stcb)) {
762				return;
763			}
764		} else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) {
765			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
766			    cp->ch.chunk_flags);
767			if (sctp_handle_nat_missing_state(stcb, net)) {
768				return;
769			}
770		}
771	}
772	/* stop any receive timers */
773	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
774	/* notify user of the abort and clean up... */
775	sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
776	/* free the tcb */
777#if defined(SCTP_PANIC_ON_ABORT)
778	printf("stcb:%p state:%d rport:%d net:%p\n",
779	    stcb, stcb->asoc.state, stcb->rport, net);
780	if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
781		panic("Received an ABORT");
782	} else {
783		printf("No panic its in state %x closed\n", stcb->asoc.state);
784	}
785#endif
786	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
787	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
788	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
789		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
790	}
791#ifdef SCTP_ASOCLOG_OF_TSNS
792	sctp_print_out_track_log(stcb);
793#endif
794#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
795	so = SCTP_INP_SO(stcb->sctp_ep);
796	atomic_add_int(&stcb->asoc.refcnt, 1);
797	SCTP_TCB_UNLOCK(stcb);
798	SCTP_SOCKET_LOCK(so, 1);
799	SCTP_TCB_LOCK(stcb);
800	atomic_subtract_int(&stcb->asoc.refcnt, 1);
801#endif
802	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
803	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
804	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
805#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
806	SCTP_SOCKET_UNLOCK(so, 1);
807#endif
808	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
809}
810
811static void
812sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
813    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
814{
815	struct sctp_association *asoc;
816	int some_on_streamwheel;
817
818#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
819	struct socket *so;
820
821#endif
822
823	SCTPDBG(SCTP_DEBUG_INPUT2,
824	    "sctp_handle_shutdown: handling SHUTDOWN\n");
825	if (stcb == NULL)
826		return;
827	asoc = &stcb->asoc;
828	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
829	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
830		return;
831	}
832	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
833		/* Shutdown NOT the expected size */
834		return;
835	} else {
836		sctp_update_acked(stcb, cp, net, abort_flag);
837	}
838	if (asoc->control_pdapi) {
839		/*
840		 * With a normal shutdown we assume the end of last record.
841		 */
842		SCTP_INP_READ_LOCK(stcb->sctp_ep);
843		asoc->control_pdapi->end_added = 1;
844		asoc->control_pdapi->pdapi_aborted = 1;
845		asoc->control_pdapi = NULL;
846		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
847#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
848		so = SCTP_INP_SO(stcb->sctp_ep);
849		atomic_add_int(&stcb->asoc.refcnt, 1);
850		SCTP_TCB_UNLOCK(stcb);
851		SCTP_SOCKET_LOCK(so, 1);
852		SCTP_TCB_LOCK(stcb);
853		atomic_subtract_int(&stcb->asoc.refcnt, 1);
854		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
855			/* assoc was freed while we were unlocked */
856			SCTP_SOCKET_UNLOCK(so, 1);
857			return;
858		}
859#endif
860		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
861#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
862		SCTP_SOCKET_UNLOCK(so, 1);
863#endif
864	}
865	/* goto SHUTDOWN_RECEIVED state to block new requests */
866	if (stcb->sctp_socket) {
867		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
868		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
869		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
870			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
871			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
872			/*
873			 * notify upper layer that peer has initiated a
874			 * shutdown
875			 */
876			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
877
878			/* reset time */
879			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
880		}
881	}
882	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
883		/*
884		 * stop the shutdown timer, since we WILL move to
885		 * SHUTDOWN-ACK-SENT.
886		 */
887		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
888	}
889	/* Now is there unsent data on a stream somewhere? */
890	some_on_streamwheel = sctp_is_there_unsent_data(stcb);
891
892	if (!TAILQ_EMPTY(&asoc->send_queue) ||
893	    !TAILQ_EMPTY(&asoc->sent_queue) ||
894	    some_on_streamwheel) {
895		/* By returning we will push more data out */
896		return;
897	} else {
898		/* no outstanding data to send, so move on... */
899		/* send SHUTDOWN-ACK */
900		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
901		/* move to SHUTDOWN-ACK-SENT state */
902		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
903		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
904			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
905		}
906		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
907		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
908		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net,
909		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
910		/* start SHUTDOWN timer */
911		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
912		    stcb, net);
913	}
914}
915
916static void
917sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
918    struct sctp_tcb *stcb, struct sctp_nets *net)
919{
920	struct sctp_association *asoc;
921
922#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
923	struct socket *so;
924
925	so = SCTP_INP_SO(stcb->sctp_ep);
926#endif
927	SCTPDBG(SCTP_DEBUG_INPUT2,
928	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
929	if (stcb == NULL)
930		return;
931
932	asoc = &stcb->asoc;
933	/* process according to association state */
934	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
935	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
936		/* unexpected SHUTDOWN-ACK... so ignore... */
937		SCTP_TCB_UNLOCK(stcb);
938		return;
939	}
940	if (asoc->control_pdapi) {
941		/*
942		 * With a normal shutdown we assume the end of last record.
943		 */
944		SCTP_INP_READ_LOCK(stcb->sctp_ep);
945		asoc->control_pdapi->end_added = 1;
946		asoc->control_pdapi->pdapi_aborted = 1;
947		asoc->control_pdapi = NULL;
948		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
949#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
950		atomic_add_int(&stcb->asoc.refcnt, 1);
951		SCTP_TCB_UNLOCK(stcb);
952		SCTP_SOCKET_LOCK(so, 1);
953		SCTP_TCB_LOCK(stcb);
954		atomic_subtract_int(&stcb->asoc.refcnt, 1);
955		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
956			/* assoc was freed while we were unlocked */
957			SCTP_SOCKET_UNLOCK(so, 1);
958			return;
959		}
960#endif
961		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
962#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
963		SCTP_SOCKET_UNLOCK(so, 1);
964#endif
965	}
966	/* are the queues empty? */
967	if (!TAILQ_EMPTY(&asoc->send_queue) ||
968	    !TAILQ_EMPTY(&asoc->sent_queue) ||
969	    !TAILQ_EMPTY(&asoc->out_wheel)) {
970		sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
971	}
972	/* stop the timer */
973	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
974	/* send SHUTDOWN-COMPLETE */
975	sctp_send_shutdown_complete(stcb, net);
976	/* notify upper layer protocol */
977	if (stcb->sctp_socket) {
978		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
979		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
980		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
981			/* Set the connected flag to disconnected */
982			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
983		}
984	}
985	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
986	/* free the TCB but first save off the ep */
987#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
988	atomic_add_int(&stcb->asoc.refcnt, 1);
989	SCTP_TCB_UNLOCK(stcb);
990	SCTP_SOCKET_LOCK(so, 1);
991	SCTP_TCB_LOCK(stcb);
992	atomic_subtract_int(&stcb->asoc.refcnt, 1);
993#endif
994	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
995	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
996#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
997	SCTP_SOCKET_UNLOCK(so, 1);
998#endif
999}
1000
1001/*
1002 * Skip past the param header and then we will find the chunk that caused the
1003 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
1004 * our peer must be broken.
1005 */
1006static void
1007sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
1008    struct sctp_nets *net)
1009{
1010	struct sctp_chunkhdr *chk;
1011
1012	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
1013	switch (chk->chunk_type) {
1014	case SCTP_ASCONF_ACK:
1015	case SCTP_ASCONF:
1016		sctp_asconf_cleanup(stcb, net);
1017		break;
1018	case SCTP_FORWARD_CUM_TSN:
1019		stcb->asoc.peer_supports_prsctp = 0;
1020		break;
1021	default:
1022		SCTPDBG(SCTP_DEBUG_INPUT2,
1023		    "Peer does not support chunk type %d(%x)??\n",
1024		    chk->chunk_type, (uint32_t) chk->chunk_type);
1025		break;
1026	}
1027}
1028
1029/*
1030 * Skip past the param header and then we will find the param that caused the
1031 * problem.  There are a number of param's in a ASCONF OR the prsctp param
1032 * these will turn of specific features.
1033 */
1034static void
1035sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1036{
1037	struct sctp_paramhdr *pbad;
1038
1039	pbad = phdr + 1;
1040	switch (ntohs(pbad->param_type)) {
1041		/* pr-sctp draft */
1042	case SCTP_PRSCTP_SUPPORTED:
1043		stcb->asoc.peer_supports_prsctp = 0;
1044		break;
1045	case SCTP_SUPPORTED_CHUNK_EXT:
1046		break;
1047		/* draft-ietf-tsvwg-addip-sctp */
1048	case SCTP_HAS_NAT_SUPPORT:
1049		stcb->asoc.peer_supports_nat = 0;
1050		break;
1051	case SCTP_ECN_NONCE_SUPPORTED:
1052		stcb->asoc.peer_supports_ecn_nonce = 0;
1053		stcb->asoc.ecn_nonce_allowed = 0;
1054		stcb->asoc.ecn_allowed = 0;
1055		break;
1056	case SCTP_ADD_IP_ADDRESS:
1057	case SCTP_DEL_IP_ADDRESS:
1058	case SCTP_SET_PRIM_ADDR:
1059		stcb->asoc.peer_supports_asconf = 0;
1060		break;
1061	case SCTP_SUCCESS_REPORT:
1062	case SCTP_ERROR_CAUSE_IND:
1063		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1064		SCTPDBG(SCTP_DEBUG_INPUT2,
1065		    "Turning off ASCONF to this strange peer\n");
1066		stcb->asoc.peer_supports_asconf = 0;
1067		break;
1068	default:
1069		SCTPDBG(SCTP_DEBUG_INPUT2,
1070		    "Peer does not support param type %d(%x)??\n",
1071		    pbad->param_type, (uint32_t) pbad->param_type);
1072		break;
1073	}
1074}
1075
1076static int
1077sctp_handle_error(struct sctp_chunkhdr *ch,
1078    struct sctp_tcb *stcb, struct sctp_nets *net)
1079{
1080	int chklen;
1081	struct sctp_paramhdr *phdr;
1082	uint16_t error_type;
1083	uint16_t error_len;
1084	struct sctp_association *asoc;
1085	int adjust;
1086
1087#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1088	struct socket *so;
1089
1090#endif
1091
1092	/* parse through all of the errors and process */
1093	asoc = &stcb->asoc;
1094	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1095	    sizeof(struct sctp_chunkhdr));
1096	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1097	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1098		/* Process an Error Cause */
1099		error_type = ntohs(phdr->param_type);
1100		error_len = ntohs(phdr->param_length);
1101		if ((error_len > chklen) || (error_len == 0)) {
1102			/* invalid param length for this param */
1103			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1104			    chklen, error_len);
1105			return (0);
1106		}
1107		switch (error_type) {
1108		case SCTP_CAUSE_INVALID_STREAM:
1109		case SCTP_CAUSE_MISSING_PARAM:
1110		case SCTP_CAUSE_INVALID_PARAM:
1111		case SCTP_CAUSE_NO_USER_DATA:
1112			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1113			    error_type);
1114			break;
1115		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1116			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1117			    ch->chunk_flags);
1118			if (sctp_handle_nat_colliding_state(stcb)) {
1119				return (0);
1120			}
1121			break;
1122		case SCTP_CAUSE_NAT_MISSING_STATE:
1123			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1124			    ch->chunk_flags);
1125			if (sctp_handle_nat_missing_state(stcb, net)) {
1126				return (0);
1127			}
1128			break;
1129		case SCTP_CAUSE_STALE_COOKIE:
1130			/*
1131			 * We only act if we have echoed a cookie and are
1132			 * waiting.
1133			 */
1134			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1135				int *p;
1136
1137				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1138				/* Save the time doubled */
1139				asoc->cookie_preserve_req = ntohl(*p) << 1;
1140				asoc->stale_cookie_count++;
1141				if (asoc->stale_cookie_count >
1142				    asoc->max_init_times) {
1143					sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
1144					/* now free the asoc */
1145#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1146					so = SCTP_INP_SO(stcb->sctp_ep);
1147					atomic_add_int(&stcb->asoc.refcnt, 1);
1148					SCTP_TCB_UNLOCK(stcb);
1149					SCTP_SOCKET_LOCK(so, 1);
1150					SCTP_TCB_LOCK(stcb);
1151					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1152#endif
1153					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1154					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1155#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1156					SCTP_SOCKET_UNLOCK(so, 1);
1157#endif
1158					return (-1);
1159				}
1160				/* blast back to INIT state */
1161				sctp_toss_old_cookies(stcb, &stcb->asoc);
1162				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1163				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1164				sctp_stop_all_cookie_timers(stcb);
1165				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1166			}
1167			break;
1168		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1169			/*
1170			 * Nothing we can do here, we don't do hostname
1171			 * addresses so if the peer does not like my IPv6
1172			 * (or IPv4 for that matter) it does not matter. If
1173			 * they don't support that type of address, they can
1174			 * NOT possibly get that packet type... i.e. with no
1175			 * IPv6 you can't recieve a IPv6 packet. so we can
1176			 * safely ignore this one. If we ever added support
1177			 * for HOSTNAME Addresses, then we would need to do
1178			 * something here.
1179			 */
1180			break;
1181		case SCTP_CAUSE_UNRECOG_CHUNK:
1182			sctp_process_unrecog_chunk(stcb, phdr, net);
1183			break;
1184		case SCTP_CAUSE_UNRECOG_PARAM:
1185			sctp_process_unrecog_param(stcb, phdr);
1186			break;
1187		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1188			/*
1189			 * We ignore this since the timer will drive out a
1190			 * new cookie anyway and there timer will drive us
1191			 * to send a SHUTDOWN_COMPLETE. We can't send one
1192			 * here since we don't have their tag.
1193			 */
1194			break;
1195		case SCTP_CAUSE_DELETING_LAST_ADDR:
1196		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1197		case SCTP_CAUSE_DELETING_SRC_ADDR:
1198			/*
1199			 * We should NOT get these here, but in a
1200			 * ASCONF-ACK.
1201			 */
1202			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1203			    error_type);
1204			break;
1205		case SCTP_CAUSE_OUT_OF_RESC:
1206			/*
1207			 * And what, pray tell do we do with the fact that
1208			 * the peer is out of resources? Not really sure we
1209			 * could do anything but abort. I suspect this
1210			 * should have came WITH an abort instead of in a
1211			 * OP-ERROR.
1212			 */
1213			break;
1214		default:
1215			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1216			    error_type);
1217			break;
1218		}
1219		adjust = SCTP_SIZE32(error_len);
1220		chklen -= adjust;
1221		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1222	}
1223	return (0);
1224}
1225
1226static int
1227sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1228    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1229    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
1230{
1231	struct sctp_init_ack *init_ack;
1232	struct mbuf *op_err;
1233
1234	SCTPDBG(SCTP_DEBUG_INPUT2,
1235	    "sctp_handle_init_ack: handling INIT-ACK\n");
1236
1237	if (stcb == NULL) {
1238		SCTPDBG(SCTP_DEBUG_INPUT2,
1239		    "sctp_handle_init_ack: TCB is null\n");
1240		return (-1);
1241	}
1242	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1243		/* Invalid length */
1244		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1245		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1246		    op_err, 0, net->port);
1247		*abort_no_unlock = 1;
1248		return (-1);
1249	}
1250	init_ack = &cp->init;
1251	/* validate parameters */
1252	if (init_ack->initiate_tag == 0) {
1253		/* protocol error... send an abort */
1254		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1255		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1256		    op_err, 0, net->port);
1257		*abort_no_unlock = 1;
1258		return (-1);
1259	}
1260	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1261		/* protocol error... send an abort */
1262		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1263		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1264		    op_err, 0, net->port);
1265		*abort_no_unlock = 1;
1266		return (-1);
1267	}
1268	if (init_ack->num_inbound_streams == 0) {
1269		/* protocol error... send an abort */
1270		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1271		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1272		    op_err, 0, net->port);
1273		*abort_no_unlock = 1;
1274		return (-1);
1275	}
1276	if (init_ack->num_outbound_streams == 0) {
1277		/* protocol error... send an abort */
1278		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1279		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1280		    op_err, 0, net->port);
1281		*abort_no_unlock = 1;
1282		return (-1);
1283	}
1284	/* process according to association state... */
1285	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1286	case SCTP_STATE_COOKIE_WAIT:
1287		/* this is the expected state for this chunk */
1288		/* process the INIT-ACK parameters */
1289		if (stcb->asoc.primary_destination->dest_state &
1290		    SCTP_ADDR_UNCONFIRMED) {
1291			/*
1292			 * The primary is where we sent the INIT, we can
1293			 * always consider it confirmed when the INIT-ACK is
1294			 * returned. Do this before we load addresses
1295			 * though.
1296			 */
1297			stcb->asoc.primary_destination->dest_state &=
1298			    ~SCTP_ADDR_UNCONFIRMED;
1299			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1300			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1301		}
1302		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
1303		    net, abort_no_unlock, vrf_id) < 0) {
1304			/* error in parsing parameters */
1305			return (-1);
1306		}
1307		/* update our state */
1308		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1309		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1310
1311		/* reset the RTO calc */
1312		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1313			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1314			    stcb->asoc.overall_error_count,
1315			    0,
1316			    SCTP_FROM_SCTP_INPUT,
1317			    __LINE__);
1318		}
1319		stcb->asoc.overall_error_count = 0;
1320		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1321		/*
1322		 * collapse the init timer back in case of a exponential
1323		 * backoff
1324		 */
1325		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1326		    stcb, net);
1327		/*
1328		 * the send at the end of the inbound data processing will
1329		 * cause the cookie to be sent
1330		 */
1331		break;
1332	case SCTP_STATE_SHUTDOWN_SENT:
1333		/* incorrect state... discard */
1334		break;
1335	case SCTP_STATE_COOKIE_ECHOED:
1336		/* incorrect state... discard */
1337		break;
1338	case SCTP_STATE_OPEN:
1339		/* incorrect state... discard */
1340		break;
1341	case SCTP_STATE_EMPTY:
1342	case SCTP_STATE_INUSE:
1343	default:
1344		/* incorrect state... discard */
1345		return (-1);
1346		break;
1347	}
1348	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1349	return (0);
1350}
1351
1352static struct sctp_tcb *
1353sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1354    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1355    struct sctp_inpcb *inp, struct sctp_nets **netp,
1356    struct sockaddr *init_src, int *notification,
1357    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1358    uint32_t vrf_id, uint16_t port);
1359
1360
1361/*
1362 * handle a state cookie for an existing association m: input packet mbuf
1363 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1364 * "split" mbuf and the cookie signature does not exist offset: offset into
1365 * mbuf to the cookie-echo chunk
1366 */
1367static struct sctp_tcb *
1368sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1369    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1370    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1371    struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
1372    uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port)
1373{
1374	struct sctp_association *asoc;
1375	struct sctp_init_chunk *init_cp, init_buf;
1376	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1377	struct sctp_nets *net;
1378	struct mbuf *op_err;
1379	struct sctp_paramhdr *ph;
1380	int chk_length;
1381	int init_offset, initack_offset, i;
1382	int retval;
1383	int spec_flag = 0;
1384	uint32_t how_indx;
1385
1386	net = *netp;
1387	/* I know that the TCB is non-NULL from the caller */
1388	asoc = &stcb->asoc;
1389	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1390		if (asoc->cookie_how[how_indx] == 0)
1391			break;
1392	}
1393	if (how_indx < sizeof(asoc->cookie_how)) {
1394		asoc->cookie_how[how_indx] = 1;
1395	}
1396	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1397		/* SHUTDOWN came in after sending INIT-ACK */
1398		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1399		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1400		    0, M_DONTWAIT, 1, MT_DATA);
1401		if (op_err == NULL) {
1402			/* FOOBAR */
1403			return (NULL);
1404		}
1405		/* Set the len */
1406		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1407		ph = mtod(op_err, struct sctp_paramhdr *);
1408		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1409		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1410		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1411		    vrf_id, net->port);
1412		if (how_indx < sizeof(asoc->cookie_how))
1413			asoc->cookie_how[how_indx] = 2;
1414		return (NULL);
1415	}
1416	/*
1417	 * find and validate the INIT chunk in the cookie (peer's info) the
1418	 * INIT should start after the cookie-echo header struct (chunk
1419	 * header, state cookie header struct)
1420	 */
1421	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1422
1423	init_cp = (struct sctp_init_chunk *)
1424	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1425	    (uint8_t *) & init_buf);
1426	if (init_cp == NULL) {
1427		/* could not pull a INIT chunk in cookie */
1428		return (NULL);
1429	}
1430	chk_length = ntohs(init_cp->ch.chunk_length);
1431	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1432		return (NULL);
1433	}
1434	/*
1435	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1436	 * INIT-ACK follows the INIT chunk
1437	 */
1438	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1439	initack_cp = (struct sctp_init_ack_chunk *)
1440	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1441	    (uint8_t *) & initack_buf);
1442	if (initack_cp == NULL) {
1443		/* could not pull INIT-ACK chunk in cookie */
1444		return (NULL);
1445	}
1446	chk_length = ntohs(initack_cp->ch.chunk_length);
1447	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1448		return (NULL);
1449	}
1450	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1451	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1452		/*
1453		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1454		 * to get into the OPEN state
1455		 */
1456		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1457			/*-
1458			 * Opps, this means that we somehow generated two vtag's
1459			 * the same. I.e. we did:
1460			 *  Us               Peer
1461			 *   <---INIT(tag=a)------
1462			 *   ----INIT-ACK(tag=t)-->
1463			 *   ----INIT(tag=t)------> *1
1464			 *   <---INIT-ACK(tag=a)---
1465                         *   <----CE(tag=t)------------- *2
1466			 *
1467			 * At point *1 we should be generating a different
1468			 * tag t'. Which means we would throw away the CE and send
1469			 * ours instead. Basically this is case C (throw away side).
1470			 */
1471			if (how_indx < sizeof(asoc->cookie_how))
1472				asoc->cookie_how[how_indx] = 17;
1473			return (NULL);
1474
1475		}
1476		switch SCTP_GET_STATE
1477			(asoc) {
1478		case SCTP_STATE_COOKIE_WAIT:
1479		case SCTP_STATE_COOKIE_ECHOED:
1480			/*
1481			 * INIT was sent but got a COOKIE_ECHO with the
1482			 * correct tags... just accept it...but we must
1483			 * process the init so that we can make sure we have
1484			 * the right seq no's.
1485			 */
1486			/* First we must process the INIT !! */
1487			retval = sctp_process_init(init_cp, stcb, net);
1488			if (retval < 0) {
1489				if (how_indx < sizeof(asoc->cookie_how))
1490					asoc->cookie_how[how_indx] = 3;
1491				return (NULL);
1492			}
1493			/* we have already processed the INIT so no problem */
1494			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1495			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1496			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1497			/* update current state */
1498			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1499				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1500			else
1501				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1502
1503			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1504			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1505				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1506				    stcb->sctp_ep, stcb, asoc->primary_destination);
1507			}
1508			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1509			sctp_stop_all_cookie_timers(stcb);
1510			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1511			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1512			    (inp->sctp_socket->so_qlimit == 0)
1513			    ) {
1514#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1515				struct socket *so;
1516
1517#endif
1518				/*
1519				 * Here is where collision would go if we
1520				 * did a connect() and instead got a
1521				 * init/init-ack/cookie done before the
1522				 * init-ack came back..
1523				 */
1524				stcb->sctp_ep->sctp_flags |=
1525				    SCTP_PCB_FLAGS_CONNECTED;
1526#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1527				so = SCTP_INP_SO(stcb->sctp_ep);
1528				atomic_add_int(&stcb->asoc.refcnt, 1);
1529				SCTP_TCB_UNLOCK(stcb);
1530				SCTP_SOCKET_LOCK(so, 1);
1531				SCTP_TCB_LOCK(stcb);
1532				atomic_add_int(&stcb->asoc.refcnt, -1);
1533				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1534					SCTP_SOCKET_UNLOCK(so, 1);
1535					return (NULL);
1536				}
1537#endif
1538				soisconnected(stcb->sctp_socket);
1539#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1540				SCTP_SOCKET_UNLOCK(so, 1);
1541#endif
1542			}
1543			/* notify upper layer */
1544			*notification = SCTP_NOTIFY_ASSOC_UP;
1545			/*
1546			 * since we did not send a HB make sure we don't
1547			 * double things
1548			 */
1549			net->hb_responded = 1;
1550			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1551			    &cookie->time_entered, sctp_align_unsafe_makecopy);
1552
1553			if (stcb->asoc.sctp_autoclose_ticks &&
1554			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1555				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1556				    inp, stcb, NULL);
1557			}
1558			break;
1559		default:
1560			/*
1561			 * we're in the OPEN state (or beyond), so peer must
1562			 * have simply lost the COOKIE-ACK
1563			 */
1564			break;
1565			}	/* end switch */
1566		sctp_stop_all_cookie_timers(stcb);
1567		/*
1568		 * We ignore the return code here.. not sure if we should
1569		 * somehow abort.. but we do have an existing asoc. This
1570		 * really should not fail.
1571		 */
1572		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1573		    init_offset + sizeof(struct sctp_init_chunk),
1574		    initack_offset, sh, init_src)) {
1575			if (how_indx < sizeof(asoc->cookie_how))
1576				asoc->cookie_how[how_indx] = 4;
1577			return (NULL);
1578		}
1579		/* respond with a COOKIE-ACK */
1580		sctp_toss_old_cookies(stcb, asoc);
1581		sctp_send_cookie_ack(stcb);
1582		if (how_indx < sizeof(asoc->cookie_how))
1583			asoc->cookie_how[how_indx] = 5;
1584		return (stcb);
1585	}
1586	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1587	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1588	    cookie->tie_tag_my_vtag == 0 &&
1589	    cookie->tie_tag_peer_vtag == 0) {
1590		/*
1591		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1592		 */
1593		if (how_indx < sizeof(asoc->cookie_how))
1594			asoc->cookie_how[how_indx] = 6;
1595		return (NULL);
1596	}
1597	/*
1598	 * If nat support, and the below and stcb is established, send back
1599	 * a ABORT(colliding state) if we are established.
1600	 */
1601	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
1602	    (asoc->peer_supports_nat) &&
1603	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1604	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1605	    (asoc->peer_vtag == 0)))) {
1606		/*
1607		 * Special case - Peer's support nat. We may have two init's
1608		 * that we gave out the same tag on since one was not
1609		 * established.. i.e. we get INIT from host-1 behind the nat
1610		 * and we respond tag-a, we get a INIT from host-2 behind
1611		 * the nat and we get tag-a again. Then we bring up host-1
1612		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1613		 * Now we have colliding state. We must send an abort here
1614		 * with colliding state indication.
1615		 */
1616		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1617		    0, M_DONTWAIT, 1, MT_DATA);
1618		if (op_err == NULL) {
1619			/* FOOBAR */
1620			return (NULL);
1621		}
1622		/* pre-reserve some space */
1623#ifdef INET6
1624		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1625#else
1626		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
1627#endif
1628		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1629		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1630		/* Set the len */
1631		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1632		ph = mtod(op_err, struct sctp_paramhdr *);
1633		ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
1634		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1635		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
1636		return (NULL);
1637	}
1638	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1639	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1640	    (asoc->peer_vtag == 0))) {
1641		/*
1642		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1643		 * should be ok, re-accept peer info
1644		 */
1645		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1646			/*
1647			 * Extension of case C. If we hit this, then the
1648			 * random number generator returned the same vtag
1649			 * when we first sent our INIT-ACK and when we later
1650			 * sent our INIT. The side with the seq numbers that
1651			 * are different will be the one that normnally
1652			 * would have hit case C. This in effect "extends"
1653			 * our vtags in this collision case to be 64 bits.
1654			 * The same collision could occur aka you get both
1655			 * vtag and seq number the same twice in a row.. but
1656			 * is much less likely. If it did happen then we
1657			 * would proceed through and bring up the assoc.. we
1658			 * may end up with the wrong stream setup however..
1659			 * which would be bad.. but there is no way to
1660			 * tell.. until we send on a stream that does not
1661			 * exist :-)
1662			 */
1663			if (how_indx < sizeof(asoc->cookie_how))
1664				asoc->cookie_how[how_indx] = 7;
1665
1666			return (NULL);
1667		}
1668		if (how_indx < sizeof(asoc->cookie_how))
1669			asoc->cookie_how[how_indx] = 8;
1670		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1671		sctp_stop_all_cookie_timers(stcb);
1672		/*
1673		 * since we did not send a HB make sure we don't double
1674		 * things
1675		 */
1676		net->hb_responded = 1;
1677		if (stcb->asoc.sctp_autoclose_ticks &&
1678		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1679			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1680			    NULL);
1681		}
1682		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1683		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1684
1685		/* Note last_cwr_tsn? where is this used? */
1686		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1687		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1688			/*
1689			 * Ok the peer probably discarded our data (if we
1690			 * echoed a cookie+data). So anything on the
1691			 * sent_queue should be marked for retransmit, we
1692			 * may not get something to kick us so it COULD
1693			 * still take a timeout to move these.. but it can't
1694			 * hurt to mark them.
1695			 */
1696			struct sctp_tmit_chunk *chk;
1697
1698			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1699				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1700					chk->sent = SCTP_DATAGRAM_RESEND;
1701					sctp_flight_size_decrease(chk);
1702					sctp_total_flight_decrease(stcb, chk);
1703					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1704					spec_flag++;
1705				}
1706			}
1707
1708		}
1709		/* process the INIT info (peer's info) */
1710		retval = sctp_process_init(init_cp, stcb, net);
1711		if (retval < 0) {
1712			if (how_indx < sizeof(asoc->cookie_how))
1713				asoc->cookie_how[how_indx] = 9;
1714			return (NULL);
1715		}
1716		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1717		    init_offset + sizeof(struct sctp_init_chunk),
1718		    initack_offset, sh, init_src)) {
1719			if (how_indx < sizeof(asoc->cookie_how))
1720				asoc->cookie_how[how_indx] = 10;
1721			return (NULL);
1722		}
1723		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1724		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1725			*notification = SCTP_NOTIFY_ASSOC_UP;
1726
1727			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1728			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1729			    (inp->sctp_socket->so_qlimit == 0)) {
1730#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1731				struct socket *so;
1732
1733#endif
1734				stcb->sctp_ep->sctp_flags |=
1735				    SCTP_PCB_FLAGS_CONNECTED;
1736#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1737				so = SCTP_INP_SO(stcb->sctp_ep);
1738				atomic_add_int(&stcb->asoc.refcnt, 1);
1739				SCTP_TCB_UNLOCK(stcb);
1740				SCTP_SOCKET_LOCK(so, 1);
1741				SCTP_TCB_LOCK(stcb);
1742				atomic_add_int(&stcb->asoc.refcnt, -1);
1743				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1744					SCTP_SOCKET_UNLOCK(so, 1);
1745					return (NULL);
1746				}
1747#endif
1748				soisconnected(stcb->sctp_socket);
1749#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1750				SCTP_SOCKET_UNLOCK(so, 1);
1751#endif
1752			}
1753			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1754				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1755			else
1756				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1757			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1758		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1759			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1760		} else {
1761			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1762		}
1763		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1764		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1765			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1766			    stcb->sctp_ep, stcb, asoc->primary_destination);
1767		}
1768		sctp_stop_all_cookie_timers(stcb);
1769		sctp_toss_old_cookies(stcb, asoc);
1770		sctp_send_cookie_ack(stcb);
1771		if (spec_flag) {
1772			/*
1773			 * only if we have retrans set do we do this. What
1774			 * this call does is get only the COOKIE-ACK out and
1775			 * then when we return the normal call to
1776			 * sctp_chunk_output will get the retrans out behind
1777			 * this.
1778			 */
1779			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1780		}
1781		if (how_indx < sizeof(asoc->cookie_how))
1782			asoc->cookie_how[how_indx] = 11;
1783
1784		return (stcb);
1785	}
1786	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1787	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1788	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1789	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1790	    cookie->tie_tag_peer_vtag != 0) {
1791		struct sctpasochead *head;
1792
1793		if (asoc->peer_supports_nat) {
1794			/*
1795			 * This is a gross gross hack. just call the
1796			 * cookie_new code since we are allowing a duplicate
1797			 * association. I hope this works...
1798			 */
1799			return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len,
1800			    inp, netp, init_src, notification,
1801			    auth_skipped, auth_offset, auth_len,
1802			    vrf_id, port));
1803		}
1804		/*
1805		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1806		 */
1807		/* temp code */
1808		if (how_indx < sizeof(asoc->cookie_how))
1809			asoc->cookie_how[how_indx] = 12;
1810		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1811		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1812
1813		*sac_assoc_id = sctp_get_associd(stcb);
1814		/* notify upper layer */
1815		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1816		atomic_add_int(&stcb->asoc.refcnt, 1);
1817		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1818		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1819		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1820			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1821		}
1822		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1823			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1824		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1825			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1826		}
1827		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1828			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1829			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1830			    stcb->sctp_ep, stcb, asoc->primary_destination);
1831
1832		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1833			/* move to OPEN state, if not in SHUTDOWN_SENT */
1834			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1835		}
1836		asoc->pre_open_streams =
1837		    ntohs(initack_cp->init.num_outbound_streams);
1838		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1839		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1840		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1841
1842		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1843		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1844
1845		asoc->str_reset_seq_in = asoc->init_seq_number;
1846
1847		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1848		if (asoc->mapping_array) {
1849			memset(asoc->mapping_array, 0,
1850			    asoc->mapping_array_size);
1851		}
1852		/* EY 05/13/08 - nr_sack version of the above if statement */
1853		if (asoc->nr_mapping_array && SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)
1854		    && asoc->peer_supports_nr_sack) {
1855			memset(asoc->nr_mapping_array, 0,
1856			    asoc->nr_mapping_array_size);
1857		}
1858		SCTP_TCB_UNLOCK(stcb);
1859		SCTP_INP_INFO_WLOCK();
1860		SCTP_INP_WLOCK(stcb->sctp_ep);
1861		SCTP_TCB_LOCK(stcb);
1862		atomic_add_int(&stcb->asoc.refcnt, -1);
1863		/* send up all the data */
1864		SCTP_TCB_SEND_LOCK(stcb);
1865
1866		sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED);
1867		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1868			stcb->asoc.strmout[i].stream_no = i;
1869			stcb->asoc.strmout[i].next_sequence_sent = 0;
1870			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1871		}
1872		/* process the INIT-ACK info (my info) */
1873		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1874		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1875
1876		/* pull from vtag hash */
1877		LIST_REMOVE(stcb, sctp_asocs);
1878		/* re-insert to new vtag position */
1879		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1880		    SCTP_BASE_INFO(hashasocmark))];
1881		/*
1882		 * put it in the bucket in the vtag hash of assoc's for the
1883		 * system
1884		 */
1885		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1886
1887		/* process the INIT info (peer's info) */
1888		SCTP_TCB_SEND_UNLOCK(stcb);
1889		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1890		SCTP_INP_INFO_WUNLOCK();
1891
1892		retval = sctp_process_init(init_cp, stcb, net);
1893		if (retval < 0) {
1894			if (how_indx < sizeof(asoc->cookie_how))
1895				asoc->cookie_how[how_indx] = 13;
1896
1897			return (NULL);
1898		}
1899		/*
1900		 * since we did not send a HB make sure we don't double
1901		 * things
1902		 */
1903		net->hb_responded = 1;
1904
1905		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1906		    init_offset + sizeof(struct sctp_init_chunk),
1907		    initack_offset, sh, init_src)) {
1908			if (how_indx < sizeof(asoc->cookie_how))
1909				asoc->cookie_how[how_indx] = 14;
1910
1911			return (NULL);
1912		}
1913		/* respond with a COOKIE-ACK */
1914		sctp_stop_all_cookie_timers(stcb);
1915		sctp_toss_old_cookies(stcb, asoc);
1916		sctp_send_cookie_ack(stcb);
1917		if (how_indx < sizeof(asoc->cookie_how))
1918			asoc->cookie_how[how_indx] = 15;
1919
1920		return (stcb);
1921	}
1922	if (how_indx < sizeof(asoc->cookie_how))
1923		asoc->cookie_how[how_indx] = 16;
1924	/* all other cases... */
1925	return (NULL);
1926}
1927
1928
1929/*
1930 * handle a state cookie for a new association m: input packet mbuf chain--
1931 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1932 * and the cookie signature does not exist offset: offset into mbuf to the
1933 * cookie-echo chunk length: length of the cookie chunk to: where the init
1934 * was from returns a new TCB
1935 */
1936struct sctp_tcb *
1937sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1938    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1939    struct sctp_inpcb *inp, struct sctp_nets **netp,
1940    struct sockaddr *init_src, int *notification,
1941    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1942    uint32_t vrf_id, uint16_t port)
1943{
1944	struct sctp_tcb *stcb;
1945	struct sctp_init_chunk *init_cp, init_buf;
1946	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1947	struct sockaddr_storage sa_store;
1948	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1949	struct sockaddr_in *sin;
1950	struct sockaddr_in6 *sin6;
1951	struct sctp_association *asoc;
1952	int chk_length;
1953	int init_offset, initack_offset, initack_limit;
1954	int retval;
1955	int error = 0;
1956	uint32_t old_tag;
1957	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1958
1959#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1960	struct socket *so;
1961
1962	so = SCTP_INP_SO(inp);
1963#endif
1964
1965	/*
1966	 * find and validate the INIT chunk in the cookie (peer's info) the
1967	 * INIT should start after the cookie-echo header struct (chunk
1968	 * header, state cookie header struct)
1969	 */
1970	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1971	init_cp = (struct sctp_init_chunk *)
1972	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1973	    (uint8_t *) & init_buf);
1974	if (init_cp == NULL) {
1975		/* could not pull a INIT chunk in cookie */
1976		SCTPDBG(SCTP_DEBUG_INPUT1,
1977		    "process_cookie_new: could not pull INIT chunk hdr\n");
1978		return (NULL);
1979	}
1980	chk_length = ntohs(init_cp->ch.chunk_length);
1981	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1982		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1983		return (NULL);
1984	}
1985	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1986	/*
1987	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1988	 * INIT-ACK follows the INIT chunk
1989	 */
1990	initack_cp = (struct sctp_init_ack_chunk *)
1991	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1992	    (uint8_t *) & initack_buf);
1993	if (initack_cp == NULL) {
1994		/* could not pull INIT-ACK chunk in cookie */
1995		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1996		return (NULL);
1997	}
1998	chk_length = ntohs(initack_cp->ch.chunk_length);
1999	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2000		return (NULL);
2001	}
2002	/*
2003	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2004	 * "initack_limit" value.  This is because the chk_length field
2005	 * includes the length of the cookie, but the cookie is omitted when
2006	 * the INIT and INIT_ACK are tacked onto the cookie...
2007	 */
2008	initack_limit = offset + cookie_len;
2009
2010	/*
2011	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2012	 * and popluate
2013	 */
2014
2015	/*
2016	 * Here we do a trick, we set in NULL for the proc/thread argument.
2017	 * We do this since in effect we only use the p argument when the
2018	 * socket is unbound and we must do an implicit bind. Since we are
2019	 * getting a cookie, we cannot be unbound.
2020	 */
2021	stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
2022	    ntohl(initack_cp->init.initiate_tag), vrf_id,
2023	    (struct thread *)NULL
2024	    );
2025	if (stcb == NULL) {
2026		struct mbuf *op_err;
2027
2028		/* memory problem? */
2029		SCTPDBG(SCTP_DEBUG_INPUT1,
2030		    "process_cookie_new: no room for another TCB!\n");
2031		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2032
2033		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2034		    sh, op_err, vrf_id, port);
2035		return (NULL);
2036	}
2037	/* get the correct sctp_nets */
2038	if (netp)
2039		*netp = sctp_findnet(stcb, init_src);
2040
2041	asoc = &stcb->asoc;
2042	/* get scope variables out of cookie */
2043	asoc->ipv4_local_scope = cookie->ipv4_scope;
2044	asoc->site_scope = cookie->site_scope;
2045	asoc->local_scope = cookie->local_scope;
2046	asoc->loopback_scope = cookie->loopback_scope;
2047
2048	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2049	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2050		struct mbuf *op_err;
2051
2052		/*
2053		 * Houston we have a problem. The EP changed while the
2054		 * cookie was in flight. Only recourse is to abort the
2055		 * association.
2056		 */
2057		atomic_add_int(&stcb->asoc.refcnt, 1);
2058		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2059		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2060		    sh, op_err, vrf_id, port);
2061#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2062		SCTP_TCB_UNLOCK(stcb);
2063		SCTP_SOCKET_LOCK(so, 1);
2064		SCTP_TCB_LOCK(stcb);
2065#endif
2066		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2067		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2068#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2069		SCTP_SOCKET_UNLOCK(so, 1);
2070#endif
2071		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2072		return (NULL);
2073	}
2074	/* process the INIT-ACK info (my info) */
2075	old_tag = asoc->my_vtag;
2076	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2077	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2078	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2079	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2080	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2081	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2082	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
2083	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2084	asoc->str_reset_seq_in = asoc->init_seq_number;
2085
2086	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2087
2088	/* process the INIT info (peer's info) */
2089	if (netp)
2090		retval = sctp_process_init(init_cp, stcb, *netp);
2091	else
2092		retval = 0;
2093	if (retval < 0) {
2094		atomic_add_int(&stcb->asoc.refcnt, 1);
2095#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2096		SCTP_TCB_UNLOCK(stcb);
2097		SCTP_SOCKET_LOCK(so, 1);
2098		SCTP_TCB_LOCK(stcb);
2099#endif
2100		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2101#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2102		SCTP_SOCKET_UNLOCK(so, 1);
2103#endif
2104		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2105		return (NULL);
2106	}
2107	/* load all addresses */
2108	if (sctp_load_addresses_from_init(stcb, m, iphlen,
2109	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
2110	    init_src)) {
2111		atomic_add_int(&stcb->asoc.refcnt, 1);
2112#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2113		SCTP_TCB_UNLOCK(stcb);
2114		SCTP_SOCKET_LOCK(so, 1);
2115		SCTP_TCB_LOCK(stcb);
2116#endif
2117		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2118#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2119		SCTP_SOCKET_UNLOCK(so, 1);
2120#endif
2121		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2122		return (NULL);
2123	}
2124	/*
2125	 * verify any preceding AUTH chunk that was skipped
2126	 */
2127	/* pull the local authentication parameters from the cookie/init-ack */
2128	sctp_auth_get_cookie_params(stcb, m,
2129	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2130	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2131	if (auth_skipped) {
2132		struct sctp_auth_chunk *auth;
2133
2134		auth = (struct sctp_auth_chunk *)
2135		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2136		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2137			/* auth HMAC failed, dump the assoc and packet */
2138			SCTPDBG(SCTP_DEBUG_AUTH1,
2139			    "COOKIE-ECHO: AUTH failed\n");
2140			atomic_add_int(&stcb->asoc.refcnt, 1);
2141#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2142			SCTP_TCB_UNLOCK(stcb);
2143			SCTP_SOCKET_LOCK(so, 1);
2144			SCTP_TCB_LOCK(stcb);
2145#endif
2146			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2147#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2148			SCTP_SOCKET_UNLOCK(so, 1);
2149#endif
2150			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2151			return (NULL);
2152		} else {
2153			/* remaining chunks checked... good to go */
2154			stcb->asoc.authenticated = 1;
2155		}
2156	}
2157	/* update current state */
2158	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2159	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2160	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2161		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2162		    stcb->sctp_ep, stcb, asoc->primary_destination);
2163	}
2164	sctp_stop_all_cookie_timers(stcb);
2165	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2166	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2167
2168	/*
2169	 * if we're doing ASCONFs, check to see if we have any new local
2170	 * addresses that need to get added to the peer (eg. addresses
2171	 * changed while cookie echo in flight).  This needs to be done
2172	 * after we go to the OPEN state to do the correct asconf
2173	 * processing. else, make sure we have the correct addresses in our
2174	 * lists
2175	 */
2176
2177	/* warning, we re-use sin, sin6, sa_store here! */
2178	/* pull in local_address (our "from" address) */
2179	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
2180		/* source addr is IPv4 */
2181		sin = (struct sockaddr_in *)initack_src;
2182		memset(sin, 0, sizeof(*sin));
2183		sin->sin_family = AF_INET;
2184		sin->sin_len = sizeof(struct sockaddr_in);
2185		sin->sin_addr.s_addr = cookie->laddress[0];
2186	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
2187		/* source addr is IPv6 */
2188		sin6 = (struct sockaddr_in6 *)initack_src;
2189		memset(sin6, 0, sizeof(*sin6));
2190		sin6->sin6_family = AF_INET6;
2191		sin6->sin6_len = sizeof(struct sockaddr_in6);
2192		sin6->sin6_scope_id = cookie->scope_id;
2193		memcpy(&sin6->sin6_addr, cookie->laddress,
2194		    sizeof(sin6->sin6_addr));
2195	} else {
2196		atomic_add_int(&stcb->asoc.refcnt, 1);
2197#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2198		SCTP_TCB_UNLOCK(stcb);
2199		SCTP_SOCKET_LOCK(so, 1);
2200		SCTP_TCB_LOCK(stcb);
2201#endif
2202		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2203#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2204		SCTP_SOCKET_UNLOCK(so, 1);
2205#endif
2206		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2207		return (NULL);
2208	}
2209
2210	/* set up to notify upper layer */
2211	*notification = SCTP_NOTIFY_ASSOC_UP;
2212	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2213	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2214	    (inp->sctp_socket->so_qlimit == 0)) {
2215		/*
2216		 * This is an endpoint that called connect() how it got a
2217		 * cookie that is NEW is a bit of a mystery. It must be that
2218		 * the INIT was sent, but before it got there.. a complete
2219		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2220		 * should have went to the other code.. not here.. oh well..
2221		 * a bit of protection is worth having..
2222		 */
2223		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2224#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2225		atomic_add_int(&stcb->asoc.refcnt, 1);
2226		SCTP_TCB_UNLOCK(stcb);
2227		SCTP_SOCKET_LOCK(so, 1);
2228		SCTP_TCB_LOCK(stcb);
2229		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2230		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2231			SCTP_SOCKET_UNLOCK(so, 1);
2232			return (NULL);
2233		}
2234#endif
2235		soisconnected(stcb->sctp_socket);
2236#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2237		SCTP_SOCKET_UNLOCK(so, 1);
2238#endif
2239	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2240	    (inp->sctp_socket->so_qlimit)) {
2241		/*
2242		 * We don't want to do anything with this one. Since it is
2243		 * the listening guy. The timer will get started for
2244		 * accepted connections in the caller.
2245		 */
2246		;
2247	}
2248	/* since we did not send a HB make sure we don't double things */
2249	if ((netp) && (*netp))
2250		(*netp)->hb_responded = 1;
2251
2252	if (stcb->asoc.sctp_autoclose_ticks &&
2253	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2254		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2255	}
2256	/* calculate the RTT */
2257	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2258	if ((netp) && (*netp)) {
2259		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2260		    &cookie->time_entered, sctp_align_unsafe_makecopy);
2261	}
2262	/* respond with a COOKIE-ACK */
2263	sctp_send_cookie_ack(stcb);
2264
2265	/*
2266	 * check the address lists for any ASCONFs that need to be sent
2267	 * AFTER the cookie-ack is sent
2268	 */
2269	sctp_check_address_list(stcb, m,
2270	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2271	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2272	    initack_src, cookie->local_scope, cookie->site_scope,
2273	    cookie->ipv4_scope, cookie->loopback_scope);
2274
2275
2276	return (stcb);
2277}
2278
2279/*
2280 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2281 * we NEED to make sure we are not already using the vtag. If so we
2282 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2283	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2284							    SCTP_BASE_INFO(hashasocmark))];
2285	LIST_FOREACH(stcb, head, sctp_asocs) {
2286	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2287		       -- SEND ABORT - TRY AGAIN --
2288		}
2289	}
2290*/
2291
2292/*
2293 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2294 * existing (non-NULL) TCB
2295 */
2296static struct mbuf *
2297sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2298    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2299    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2300    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2301    struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port)
2302{
2303	struct sctp_state_cookie *cookie;
2304	struct sockaddr_in6 sin6;
2305	struct sockaddr_in sin;
2306	struct sctp_tcb *l_stcb = *stcb;
2307	struct sctp_inpcb *l_inp;
2308	struct sockaddr *to;
2309	sctp_assoc_t sac_restart_id;
2310	struct sctp_pcb *ep;
2311	struct mbuf *m_sig;
2312	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2313	uint8_t *sig;
2314	uint8_t cookie_ok = 0;
2315	unsigned int size_of_pkt, sig_offset, cookie_offset;
2316	unsigned int cookie_len;
2317	struct timeval now;
2318	struct timeval time_expires;
2319	struct sockaddr_storage dest_store;
2320	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
2321	struct ip *iph;
2322	int notification = 0;
2323	struct sctp_nets *netl;
2324	int had_a_existing_tcb = 0;
2325
2326	SCTPDBG(SCTP_DEBUG_INPUT2,
2327	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2328
2329	if (inp_p == NULL) {
2330		return (NULL);
2331	}
2332	/* First get the destination address setup too. */
2333	iph = mtod(m, struct ip *);
2334	switch (iph->ip_v) {
2335	case IPVERSION:
2336		{
2337			/* its IPv4 */
2338			struct sockaddr_in *lsin;
2339
2340			lsin = (struct sockaddr_in *)(localep_sa);
2341			memset(lsin, 0, sizeof(*lsin));
2342			lsin->sin_family = AF_INET;
2343			lsin->sin_len = sizeof(*lsin);
2344			lsin->sin_port = sh->dest_port;
2345			lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
2346			size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
2347			break;
2348		}
2349#ifdef INET6
2350	case IPV6_VERSION >> 4:
2351		{
2352			/* its IPv6 */
2353			struct ip6_hdr *ip6;
2354			struct sockaddr_in6 *lsin6;
2355
2356			lsin6 = (struct sockaddr_in6 *)(localep_sa);
2357			memset(lsin6, 0, sizeof(*lsin6));
2358			lsin6->sin6_family = AF_INET6;
2359			lsin6->sin6_len = sizeof(struct sockaddr_in6);
2360			ip6 = mtod(m, struct ip6_hdr *);
2361			lsin6->sin6_port = sh->dest_port;
2362			lsin6->sin6_addr = ip6->ip6_dst;
2363			size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
2364			break;
2365		}
2366#endif
2367	default:
2368		return (NULL);
2369	}
2370
2371	cookie = &cp->cookie;
2372	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2373	cookie_len = ntohs(cp->ch.chunk_length);
2374
2375	if ((cookie->peerport != sh->src_port) &&
2376	    (cookie->myport != sh->dest_port) &&
2377	    (cookie->my_vtag != sh->v_tag)) {
2378		/*
2379		 * invalid ports or bad tag.  Note that we always leave the
2380		 * v_tag in the header in network order and when we stored
2381		 * it in the my_vtag slot we also left it in network order.
2382		 * This maintains the match even though it may be in the
2383		 * opposite byte order of the machine :->
2384		 */
2385		return (NULL);
2386	}
2387	if (cookie_len > size_of_pkt ||
2388	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2389	    sizeof(struct sctp_init_chunk) +
2390	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2391		/* cookie too long!  or too small */
2392		return (NULL);
2393	}
2394	/*
2395	 * split off the signature into its own mbuf (since it should not be
2396	 * calculated in the sctp_hmac_m() call).
2397	 */
2398	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2399	if (sig_offset > size_of_pkt) {
2400		/* packet not correct size! */
2401		/* XXX this may already be accounted for earlier... */
2402		return (NULL);
2403	}
2404	m_sig = m_split(m, sig_offset, M_DONTWAIT);
2405	if (m_sig == NULL) {
2406		/* out of memory or ?? */
2407		return (NULL);
2408	}
2409#ifdef SCTP_MBUF_LOGGING
2410	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2411		struct mbuf *mat;
2412
2413		mat = m_sig;
2414		while (mat) {
2415			if (SCTP_BUF_IS_EXTENDED(mat)) {
2416				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2417			}
2418			mat = SCTP_BUF_NEXT(mat);
2419		}
2420	}
2421#endif
2422
2423	/*
2424	 * compute the signature/digest for the cookie
2425	 */
2426	ep = &(*inp_p)->sctp_ep;
2427	l_inp = *inp_p;
2428	if (l_stcb) {
2429		SCTP_TCB_UNLOCK(l_stcb);
2430	}
2431	SCTP_INP_RLOCK(l_inp);
2432	if (l_stcb) {
2433		SCTP_TCB_LOCK(l_stcb);
2434	}
2435	/* which cookie is it? */
2436	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2437	    (ep->current_secret_number != ep->last_secret_number)) {
2438		/* it's the old cookie */
2439		(void)sctp_hmac_m(SCTP_HMAC,
2440		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2441		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2442	} else {
2443		/* it's the current cookie */
2444		(void)sctp_hmac_m(SCTP_HMAC,
2445		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2446		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2447	}
2448	/* get the signature */
2449	SCTP_INP_RUNLOCK(l_inp);
2450	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2451	if (sig == NULL) {
2452		/* couldn't find signature */
2453		sctp_m_freem(m_sig);
2454		return (NULL);
2455	}
2456	/* compare the received digest with the computed digest */
2457	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2458		/* try the old cookie? */
2459		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2460		    (ep->current_secret_number != ep->last_secret_number)) {
2461			/* compute digest with old */
2462			(void)sctp_hmac_m(SCTP_HMAC,
2463			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2464			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2465			/* compare */
2466			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2467				cookie_ok = 1;
2468		}
2469	} else {
2470		cookie_ok = 1;
2471	}
2472
2473	/*
2474	 * Now before we continue we must reconstruct our mbuf so that
2475	 * normal processing of any other chunks will work.
2476	 */
2477	{
2478		struct mbuf *m_at;
2479
2480		m_at = m;
2481		while (SCTP_BUF_NEXT(m_at) != NULL) {
2482			m_at = SCTP_BUF_NEXT(m_at);
2483		}
2484		SCTP_BUF_NEXT(m_at) = m_sig;
2485	}
2486
2487	if (cookie_ok == 0) {
2488		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2489		SCTPDBG(SCTP_DEBUG_INPUT2,
2490		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2491		    (uint32_t) offset, cookie_offset, sig_offset);
2492		return (NULL);
2493	}
2494	/*
2495	 * check the cookie timestamps to be sure it's not stale
2496	 */
2497	(void)SCTP_GETTIME_TIMEVAL(&now);
2498	/* Expire time is in Ticks, so we convert to seconds */
2499	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2500	time_expires.tv_usec = cookie->time_entered.tv_usec;
2501	/*
2502	 * TODO sctp_constants.h needs alternative time macros when _KERNEL
2503	 * is undefined.
2504	 */
2505	if (timevalcmp(&now, &time_expires, >)) {
2506		/* cookie is stale! */
2507		struct mbuf *op_err;
2508		struct sctp_stale_cookie_msg *scm;
2509		uint32_t tim;
2510
2511		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2512		    0, M_DONTWAIT, 1, MT_DATA);
2513		if (op_err == NULL) {
2514			/* FOOBAR */
2515			return (NULL);
2516		}
2517		/* Set the len */
2518		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2519		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2520		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2521		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2522		    (sizeof(uint32_t))));
2523		/* seconds to usec */
2524		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2525		/* add in usec */
2526		if (tim == 0)
2527			tim = now.tv_usec - cookie->time_entered.tv_usec;
2528		scm->time_usec = htonl(tim);
2529		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
2530		    vrf_id, port);
2531		return (NULL);
2532	}
2533	/*
2534	 * Now we must see with the lookup address if we have an existing
2535	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2536	 * and a INIT collided with us and somewhere the peer sent the
2537	 * cookie on another address besides the single address our assoc
2538	 * had for him. In this case we will have one of the tie-tags set at
2539	 * least AND the address field in the cookie can be used to look it
2540	 * up.
2541	 */
2542	to = NULL;
2543	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
2544		memset(&sin6, 0, sizeof(sin6));
2545		sin6.sin6_family = AF_INET6;
2546		sin6.sin6_len = sizeof(sin6);
2547		sin6.sin6_port = sh->src_port;
2548		sin6.sin6_scope_id = cookie->scope_id;
2549		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2550		    sizeof(sin6.sin6_addr.s6_addr));
2551		to = (struct sockaddr *)&sin6;
2552	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
2553		memset(&sin, 0, sizeof(sin));
2554		sin.sin_family = AF_INET;
2555		sin.sin_len = sizeof(sin);
2556		sin.sin_port = sh->src_port;
2557		sin.sin_addr.s_addr = cookie->address[0];
2558		to = (struct sockaddr *)&sin;
2559	} else {
2560		/* This should not happen */
2561		return (NULL);
2562	}
2563	if ((*stcb == NULL) && to) {
2564		/* Yep, lets check */
2565		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
2566		if (*stcb == NULL) {
2567			/*
2568			 * We should have only got back the same inp. If we
2569			 * got back a different ep we have a problem. The
2570			 * original findep got back l_inp and now
2571			 */
2572			if (l_inp != *inp_p) {
2573				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2574			}
2575		} else {
2576			if (*locked_tcb == NULL) {
2577				/*
2578				 * In this case we found the assoc only
2579				 * after we locked the create lock. This
2580				 * means we are in a colliding case and we
2581				 * must make sure that we unlock the tcb if
2582				 * its one of the cases where we throw away
2583				 * the incoming packets.
2584				 */
2585				*locked_tcb = *stcb;
2586
2587				/*
2588				 * We must also increment the inp ref count
2589				 * since the ref_count flags was set when we
2590				 * did not find the TCB, now we found it
2591				 * which reduces the refcount.. we must
2592				 * raise it back out to balance it all :-)
2593				 */
2594				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2595				if ((*stcb)->sctp_ep != l_inp) {
2596					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2597					    (*stcb)->sctp_ep, l_inp);
2598				}
2599			}
2600		}
2601	}
2602	if (to == NULL) {
2603		return (NULL);
2604	}
2605	cookie_len -= SCTP_SIGNATURE_SIZE;
2606	if (*stcb == NULL) {
2607		/* this is the "normal" case... get a new TCB */
2608		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2609		    cookie_len, *inp_p, netp, to, &notification,
2610		    auth_skipped, auth_offset, auth_len, vrf_id, port);
2611	} else {
2612		/* this is abnormal... cookie-echo on existing TCB */
2613		had_a_existing_tcb = 1;
2614		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2615		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2616		    &notification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port);
2617	}
2618
2619	if (*stcb == NULL) {
2620		/* still no TCB... must be bad cookie-echo */
2621		return (NULL);
2622	}
2623	/*
2624	 * Ok, we built an association so confirm the address we sent the
2625	 * INIT-ACK to.
2626	 */
2627	netl = sctp_findnet(*stcb, to);
2628	/*
2629	 * This code should in theory NOT run but
2630	 */
2631	if (netl == NULL) {
2632		/* TSNH! Huh, why do I need to add this address here? */
2633		int ret;
2634
2635		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2636		    SCTP_IN_COOKIE_PROC);
2637		netl = sctp_findnet(*stcb, to);
2638	}
2639	if (netl) {
2640		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2641			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2642			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2643			    netl);
2644			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2645			    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2646		}
2647	}
2648	if (*stcb) {
2649		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2650		    *stcb, NULL);
2651	}
2652	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2653		if (!had_a_existing_tcb ||
2654		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2655			/*
2656			 * If we have a NEW cookie or the connect never
2657			 * reached the connected state during collision we
2658			 * must do the TCP accept thing.
2659			 */
2660			struct socket *so, *oso;
2661			struct sctp_inpcb *inp;
2662
2663			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2664				/*
2665				 * For a restart we will keep the same
2666				 * socket, no need to do anything. I THINK!!
2667				 */
2668				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED);
2669				return (m);
2670			}
2671			oso = (*inp_p)->sctp_socket;
2672			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2673			SCTP_TCB_UNLOCK((*stcb));
2674			so = sonewconn(oso, 0
2675			    );
2676			SCTP_TCB_LOCK((*stcb));
2677			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2678
2679			if (so == NULL) {
2680				struct mbuf *op_err;
2681
2682#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2683				struct socket *pcb_so;
2684
2685#endif
2686				/* Too many sockets */
2687				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2688				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2689				sctp_abort_association(*inp_p, NULL, m, iphlen,
2690				    sh, op_err, vrf_id, port);
2691#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2692				pcb_so = SCTP_INP_SO(*inp_p);
2693				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2694				SCTP_TCB_UNLOCK((*stcb));
2695				SCTP_SOCKET_LOCK(pcb_so, 1);
2696				SCTP_TCB_LOCK((*stcb));
2697				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2698#endif
2699				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2700#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2701				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2702#endif
2703				return (NULL);
2704			}
2705			inp = (struct sctp_inpcb *)so->so_pcb;
2706			SCTP_INP_INCR_REF(inp);
2707			/*
2708			 * We add the unbound flag here so that if we get an
2709			 * soabort() before we get the move_pcb done, we
2710			 * will properly cleanup.
2711			 */
2712			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2713			    SCTP_PCB_FLAGS_CONNECTED |
2714			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2715			    SCTP_PCB_FLAGS_UNBOUND |
2716			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2717			    SCTP_PCB_FLAGS_DONT_WAKE);
2718			inp->sctp_features = (*inp_p)->sctp_features;
2719			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2720			inp->sctp_socket = so;
2721			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2722			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2723			inp->sctp_context = (*inp_p)->sctp_context;
2724			inp->inp_starting_point_for_iterator = NULL;
2725			/*
2726			 * copy in the authentication parameters from the
2727			 * original endpoint
2728			 */
2729			if (inp->sctp_ep.local_hmacs)
2730				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2731			inp->sctp_ep.local_hmacs =
2732			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2733			if (inp->sctp_ep.local_auth_chunks)
2734				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2735			inp->sctp_ep.local_auth_chunks =
2736			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2737
2738			/*
2739			 * Now we must move it from one hash table to
2740			 * another and get the tcb in the right place.
2741			 */
2742			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2743
2744			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2745			SCTP_TCB_UNLOCK((*stcb));
2746
2747			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2748			    0);
2749			SCTP_TCB_LOCK((*stcb));
2750			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2751
2752
2753			/*
2754			 * now we must check to see if we were aborted while
2755			 * the move was going on and the lock/unlock
2756			 * happened.
2757			 */
2758			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2759				/*
2760				 * yep it was, we leave the assoc attached
2761				 * to the socket since the sctp_inpcb_free()
2762				 * call will send an abort for us.
2763				 */
2764				SCTP_INP_DECR_REF(inp);
2765				return (NULL);
2766			}
2767			SCTP_INP_DECR_REF(inp);
2768			/* Switch over to the new guy */
2769			*inp_p = inp;
2770			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2771
2772			/*
2773			 * Pull it from the incomplete queue and wake the
2774			 * guy
2775			 */
2776#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2777			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2778			SCTP_TCB_UNLOCK((*stcb));
2779			SCTP_SOCKET_LOCK(so, 1);
2780#endif
2781			soisconnected(so);
2782#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2783			SCTP_TCB_LOCK((*stcb));
2784			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2785			SCTP_SOCKET_UNLOCK(so, 1);
2786#endif
2787			return (m);
2788		}
2789	}
2790	if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2791		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2792	}
2793	return (m);
2794}
2795
2796static void
2797sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2798    struct sctp_tcb *stcb, struct sctp_nets *net)
2799{
2800	/* cp must not be used, others call this without a c-ack :-) */
2801	struct sctp_association *asoc;
2802
2803	SCTPDBG(SCTP_DEBUG_INPUT2,
2804	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2805	if (stcb == NULL)
2806		return;
2807
2808	asoc = &stcb->asoc;
2809
2810	sctp_stop_all_cookie_timers(stcb);
2811	/* process according to association state */
2812	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2813		/* state change only needed when I am in right state */
2814		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2815		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2816		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2817			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2818			    stcb->sctp_ep, stcb, asoc->primary_destination);
2819
2820		}
2821		/* update RTO */
2822		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2823		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2824		if (asoc->overall_error_count == 0) {
2825			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2826			    &asoc->time_entered, sctp_align_safe_nocopy);
2827		}
2828		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2829		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2830		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2831		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2832#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2833			struct socket *so;
2834
2835#endif
2836			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2837#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2838			so = SCTP_INP_SO(stcb->sctp_ep);
2839			atomic_add_int(&stcb->asoc.refcnt, 1);
2840			SCTP_TCB_UNLOCK(stcb);
2841			SCTP_SOCKET_LOCK(so, 1);
2842			SCTP_TCB_LOCK(stcb);
2843			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2844			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2845				SCTP_SOCKET_UNLOCK(so, 1);
2846				return;
2847			}
2848#endif
2849			soisconnected(stcb->sctp_socket);
2850#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2851			SCTP_SOCKET_UNLOCK(so, 1);
2852#endif
2853		}
2854		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2855		    stcb, net);
2856		/*
2857		 * since we did not send a HB make sure we don't double
2858		 * things
2859		 */
2860		net->hb_responded = 1;
2861
2862		if (stcb->asoc.sctp_autoclose_ticks &&
2863		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2864			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2865			    stcb->sctp_ep, stcb, NULL);
2866		}
2867		/*
2868		 * send ASCONF if parameters are pending and ASCONFs are
2869		 * allowed (eg. addresses changed when init/cookie echo were
2870		 * in flight)
2871		 */
2872		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2873		    (stcb->asoc.peer_supports_asconf) &&
2874		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2875#ifdef SCTP_TIMER_BASED_ASCONF
2876			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2877			    stcb->sctp_ep, stcb,
2878			    stcb->asoc.primary_destination);
2879#else
2880			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2881			    SCTP_ADDR_NOT_LOCKED);
2882#endif
2883		}
2884	}
2885	/* Toss the cookie if I can */
2886	sctp_toss_old_cookies(stcb, asoc);
2887	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2888		/* Restart the timer if we have pending data */
2889		struct sctp_tmit_chunk *chk;
2890
2891		chk = TAILQ_FIRST(&asoc->sent_queue);
2892		if (chk) {
2893			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2894			    stcb, chk->whoTo);
2895		}
2896	}
2897}
2898
2899static void
2900sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2901    struct sctp_tcb *stcb)
2902{
2903	struct sctp_nets *net;
2904	struct sctp_tmit_chunk *lchk;
2905	uint32_t tsn;
2906
2907	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2908		return;
2909	}
2910	SCTP_STAT_INCR(sctps_recvecne);
2911	tsn = ntohl(cp->tsn);
2912	/* ECN Nonce stuff: need a resync and disable the nonce sum check */
2913	/* Also we make sure we disable the nonce_wait */
2914	lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2915	if (lchk == NULL) {
2916		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2917	} else {
2918		stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2919	}
2920	stcb->asoc.nonce_wait_for_ecne = 0;
2921	stcb->asoc.nonce_sum_check = 0;
2922
2923	/* Find where it was sent, if possible */
2924	net = NULL;
2925	lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2926	while (lchk) {
2927		if (lchk->rec.data.TSN_seq == tsn) {
2928			net = lchk->whoTo;
2929			break;
2930		}
2931		if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2932			break;
2933		lchk = TAILQ_NEXT(lchk, sctp_next);
2934	}
2935	if (net == NULL)
2936		/* default is we use the primary */
2937		net = stcb->asoc.primary_destination;
2938
2939	if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2940		/*
2941		 * JRS - Use the congestion control given in the pluggable
2942		 * CC module
2943		 */
2944		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net);
2945		/*
2946		 * we reduce once every RTT. So we will only lower cwnd at
2947		 * the next sending seq i.e. the resync_tsn.
2948		 */
2949		stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2950	}
2951	/*
2952	 * We always send a CWR this way if our previous one was lost our
2953	 * peer will get an update, or if it is not time again to reduce we
2954	 * still get the cwr to the peer.
2955	 */
2956	sctp_send_cwr(stcb, net, tsn);
2957}
2958
2959static void
2960sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2961{
2962	/*
2963	 * Here we get a CWR from the peer. We must look in the outqueue and
2964	 * make sure that we have a covered ECNE in teh control chunk part.
2965	 * If so remove it.
2966	 */
2967	struct sctp_tmit_chunk *chk;
2968	struct sctp_ecne_chunk *ecne;
2969
2970	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2971		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2972			continue;
2973		}
2974		/*
2975		 * Look for and remove if it is the right TSN. Since there
2976		 * is only ONE ECNE on the control queue at any one time we
2977		 * don't need to worry about more than one!
2978		 */
2979		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2980		if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2981		    MAX_TSN) || (cp->tsn == ecne->tsn)) {
2982			/* this covers this ECNE, we can remove it */
2983			stcb->asoc.ecn_echo_cnt_onq--;
2984			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2985			    sctp_next);
2986			if (chk->data) {
2987				sctp_m_freem(chk->data);
2988				chk->data = NULL;
2989			}
2990			stcb->asoc.ctrl_queue_cnt--;
2991			sctp_free_a_chunk(stcb, chk);
2992			break;
2993		}
2994	}
2995}
2996
2997static void
2998sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2999    struct sctp_tcb *stcb, struct sctp_nets *net)
3000{
3001	struct sctp_association *asoc;
3002
3003#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3004	struct socket *so;
3005
3006#endif
3007
3008	SCTPDBG(SCTP_DEBUG_INPUT2,
3009	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3010	if (stcb == NULL)
3011		return;
3012
3013	asoc = &stcb->asoc;
3014	/* process according to association state */
3015	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3016		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3017		SCTPDBG(SCTP_DEBUG_INPUT2,
3018		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3019		SCTP_TCB_UNLOCK(stcb);
3020		return;
3021	}
3022	/* notify upper layer protocol */
3023	if (stcb->sctp_socket) {
3024		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3025		/* are the queues empty? they should be */
3026		if (!TAILQ_EMPTY(&asoc->send_queue) ||
3027		    !TAILQ_EMPTY(&asoc->sent_queue) ||
3028		    !TAILQ_EMPTY(&asoc->out_wheel)) {
3029			sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
3030		}
3031	}
3032	/* stop the timer */
3033	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
3034	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3035	/* free the TCB */
3036	SCTPDBG(SCTP_DEBUG_INPUT2,
3037	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3038#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3039	so = SCTP_INP_SO(stcb->sctp_ep);
3040	atomic_add_int(&stcb->asoc.refcnt, 1);
3041	SCTP_TCB_UNLOCK(stcb);
3042	SCTP_SOCKET_LOCK(so, 1);
3043	SCTP_TCB_LOCK(stcb);
3044	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3045#endif
3046	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3047#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3048	SCTP_SOCKET_UNLOCK(so, 1);
3049#endif
3050	return;
3051}
3052
3053static int
3054process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3055    struct sctp_nets *net, uint8_t flg)
3056{
3057	switch (desc->chunk_type) {
3058		case SCTP_DATA:
3059		/* find the tsn to resend (possibly */
3060		{
3061			uint32_t tsn;
3062			struct sctp_tmit_chunk *tp1;
3063
3064			tsn = ntohl(desc->tsn_ifany);
3065			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3066			while (tp1) {
3067				if (tp1->rec.data.TSN_seq == tsn) {
3068					/* found it */
3069					break;
3070				}
3071				if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
3072				    MAX_TSN)) {
3073					/* not found */
3074					tp1 = NULL;
3075					break;
3076				}
3077				tp1 = TAILQ_NEXT(tp1, sctp_next);
3078			}
3079			if (tp1 == NULL) {
3080				/*
3081				 * Do it the other way , aka without paying
3082				 * attention to queue seq order.
3083				 */
3084				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3085				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3086				while (tp1) {
3087					if (tp1->rec.data.TSN_seq == tsn) {
3088						/* found it */
3089						break;
3090					}
3091					tp1 = TAILQ_NEXT(tp1, sctp_next);
3092				}
3093			}
3094			if (tp1 == NULL) {
3095				SCTP_STAT_INCR(sctps_pdrptsnnf);
3096			}
3097			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3098				uint8_t *ddp;
3099
3100				if ((stcb->asoc.peers_rwnd == 0) &&
3101				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3102					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3103					return (0);
3104				}
3105				if (stcb->asoc.peers_rwnd == 0 &&
3106				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3107					SCTP_STAT_INCR(sctps_pdrpdizrw);
3108					return (0);
3109				}
3110				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
3111				    sizeof(struct sctp_data_chunk));
3112				{
3113					unsigned int iii;
3114
3115					for (iii = 0; iii < sizeof(desc->data_bytes);
3116					    iii++) {
3117						if (ddp[iii] != desc->data_bytes[iii]) {
3118							SCTP_STAT_INCR(sctps_pdrpbadd);
3119							return (-1);
3120						}
3121					}
3122				}
3123				/*
3124				 * We zero out the nonce so resync not
3125				 * needed
3126				 */
3127				tp1->rec.data.ect_nonce = 0;
3128
3129				if (tp1->do_rtt) {
3130					/*
3131					 * this guy had a RTO calculation
3132					 * pending on it, cancel it
3133					 */
3134					tp1->do_rtt = 0;
3135				}
3136				SCTP_STAT_INCR(sctps_pdrpmark);
3137				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3138					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3139				tp1->sent = SCTP_DATAGRAM_RESEND;
3140				/*
3141				 * mark it as if we were doing a FR, since
3142				 * we will be getting gap ack reports behind
3143				 * the info from the router.
3144				 */
3145				tp1->rec.data.doing_fast_retransmit = 1;
3146				/*
3147				 * mark the tsn with what sequences can
3148				 * cause a new FR.
3149				 */
3150				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3151					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3152				} else {
3153					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
3154				}
3155
3156				/* restart the timer */
3157				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3158				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3159				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3160				    stcb, tp1->whoTo);
3161
3162				/* fix counts and things */
3163				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3164					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3165					    tp1->whoTo->flight_size,
3166					    tp1->book_size,
3167					    (uintptr_t) stcb,
3168					    tp1->rec.data.TSN_seq);
3169				}
3170				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3171					sctp_flight_size_decrease(tp1);
3172					sctp_total_flight_decrease(stcb, tp1);
3173				}
3174			} {
3175				/* audit code */
3176				unsigned int audit;
3177
3178				audit = 0;
3179				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3180					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3181						audit++;
3182				}
3183				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3184				    sctp_next) {
3185					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3186						audit++;
3187				}
3188				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3189					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3190					    audit, stcb->asoc.sent_queue_retran_cnt);
3191#ifndef SCTP_AUDITING_ENABLED
3192					stcb->asoc.sent_queue_retran_cnt = audit;
3193#endif
3194				}
3195			}
3196		}
3197		break;
3198	case SCTP_ASCONF:
3199		{
3200			struct sctp_tmit_chunk *asconf;
3201
3202			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3203			    sctp_next) {
3204				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3205					break;
3206				}
3207			}
3208			if (asconf) {
3209				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3210					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3211				asconf->sent = SCTP_DATAGRAM_RESEND;
3212				asconf->snd_count--;
3213			}
3214		}
3215		break;
3216	case SCTP_INITIATION:
3217		/* resend the INIT */
3218		stcb->asoc.dropped_special_cnt++;
3219		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3220			/*
3221			 * If we can get it in, in a few attempts we do
3222			 * this, otherwise we let the timer fire.
3223			 */
3224			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3225			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3226			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3227		}
3228		break;
3229	case SCTP_SELECTIVE_ACK:
3230		/* resend the sack */
3231		sctp_send_sack(stcb);
3232		break;
3233		/* EY for nr_sacks */
3234	case SCTP_NR_SELECTIVE_ACK:
3235		sctp_send_nr_sack(stcb);	/* EY resend the nr-sack */
3236		break;
3237	case SCTP_HEARTBEAT_REQUEST:
3238		/* resend a demand HB */
3239		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3240			/*
3241			 * Only retransmit if we KNOW we wont destroy the
3242			 * tcb
3243			 */
3244			(void)sctp_send_hb(stcb, 1, net);
3245		}
3246		break;
3247	case SCTP_SHUTDOWN:
3248		sctp_send_shutdown(stcb, net);
3249		break;
3250	case SCTP_SHUTDOWN_ACK:
3251		sctp_send_shutdown_ack(stcb, net);
3252		break;
3253	case SCTP_COOKIE_ECHO:
3254		{
3255			struct sctp_tmit_chunk *cookie;
3256
3257			cookie = NULL;
3258			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3259			    sctp_next) {
3260				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3261					break;
3262				}
3263			}
3264			if (cookie) {
3265				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3266					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3267				cookie->sent = SCTP_DATAGRAM_RESEND;
3268				sctp_stop_all_cookie_timers(stcb);
3269			}
3270		}
3271		break;
3272	case SCTP_COOKIE_ACK:
3273		sctp_send_cookie_ack(stcb);
3274		break;
3275	case SCTP_ASCONF_ACK:
3276		/* resend last asconf ack */
3277		sctp_send_asconf_ack(stcb);
3278		break;
3279	case SCTP_FORWARD_CUM_TSN:
3280		send_forward_tsn(stcb, &stcb->asoc);
3281		break;
3282		/* can't do anything with these */
3283	case SCTP_PACKET_DROPPED:
3284	case SCTP_INITIATION_ACK:	/* this should not happen */
3285	case SCTP_HEARTBEAT_ACK:
3286	case SCTP_ABORT_ASSOCIATION:
3287	case SCTP_OPERATION_ERROR:
3288	case SCTP_SHUTDOWN_COMPLETE:
3289	case SCTP_ECN_ECHO:
3290	case SCTP_ECN_CWR:
3291	default:
3292		break;
3293	}
3294	return (0);
3295}
3296
3297void
3298sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3299{
3300	int i;
3301	uint16_t temp;
3302
3303	/*
3304	 * We set things to 0xffff since this is the last delivered sequence
3305	 * and we will be sending in 0 after the reset.
3306	 */
3307
3308	if (number_entries) {
3309		for (i = 0; i < number_entries; i++) {
3310			temp = ntohs(list[i]);
3311			if (temp >= stcb->asoc.streamincnt) {
3312				continue;
3313			}
3314			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3315		}
3316	} else {
3317		list = NULL;
3318		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3319			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3320		}
3321	}
3322	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3323}
3324
3325static void
3326sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3327{
3328	int i;
3329
3330	if (number_entries == 0) {
3331		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3332			stcb->asoc.strmout[i].next_sequence_sent = 0;
3333		}
3334	} else if (number_entries) {
3335		for (i = 0; i < number_entries; i++) {
3336			uint16_t temp;
3337
3338			temp = ntohs(list[i]);
3339			if (temp >= stcb->asoc.streamoutcnt) {
3340				/* no such stream */
3341				continue;
3342			}
3343			stcb->asoc.strmout[temp].next_sequence_sent = 0;
3344		}
3345	}
3346	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3347}
3348
3349
3350struct sctp_stream_reset_out_request *
3351sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3352{
3353	struct sctp_association *asoc;
3354	struct sctp_stream_reset_out_req *req;
3355	struct sctp_stream_reset_out_request *r;
3356	struct sctp_tmit_chunk *chk;
3357	int len, clen;
3358
3359	asoc = &stcb->asoc;
3360	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3361		asoc->stream_reset_outstanding = 0;
3362		return (NULL);
3363	}
3364	if (stcb->asoc.str_reset == NULL) {
3365		asoc->stream_reset_outstanding = 0;
3366		return (NULL);
3367	}
3368	chk = stcb->asoc.str_reset;
3369	if (chk->data == NULL) {
3370		return (NULL);
3371	}
3372	if (bchk) {
3373		/* he wants a copy of the chk pointer */
3374		*bchk = chk;
3375	}
3376	clen = chk->send_size;
3377	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
3378	r = &req->sr_req;
3379	if (ntohl(r->request_seq) == seq) {
3380		/* found it */
3381		return (r);
3382	}
3383	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3384	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3385		/* move to the next one, there can only be a max of two */
3386		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3387		if (ntohl(r->request_seq) == seq) {
3388			return (r);
3389		}
3390	}
3391	/* that seq is not here */
3392	return (NULL);
3393}
3394
3395static void
3396sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3397{
3398	struct sctp_association *asoc;
3399	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3400
3401	if (stcb->asoc.str_reset == NULL) {
3402		return;
3403	}
3404	asoc = &stcb->asoc;
3405
3406	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3407	TAILQ_REMOVE(&asoc->control_send_queue,
3408	    chk,
3409	    sctp_next);
3410	if (chk->data) {
3411		sctp_m_freem(chk->data);
3412		chk->data = NULL;
3413	}
3414	asoc->ctrl_queue_cnt--;
3415	sctp_free_a_chunk(stcb, chk);
3416	/* sa_ignore NO_NULL_CHK */
3417	stcb->asoc.str_reset = NULL;
3418}
3419
3420
3421static int
3422sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3423    uint32_t seq, uint32_t action,
3424    struct sctp_stream_reset_response *respin)
3425{
3426	uint16_t type;
3427	int lparm_len;
3428	struct sctp_association *asoc = &stcb->asoc;
3429	struct sctp_tmit_chunk *chk;
3430	struct sctp_stream_reset_out_request *srparam;
3431	int number_entries;
3432
3433	if (asoc->stream_reset_outstanding == 0) {
3434		/* duplicate */
3435		return (0);
3436	}
3437	if (seq == stcb->asoc.str_reset_seq_out) {
3438		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3439		if (srparam) {
3440			stcb->asoc.str_reset_seq_out++;
3441			type = ntohs(srparam->ph.param_type);
3442			lparm_len = ntohs(srparam->ph.param_length);
3443			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3444				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3445				asoc->stream_reset_out_is_outstanding = 0;
3446				if (asoc->stream_reset_outstanding)
3447					asoc->stream_reset_outstanding--;
3448				if (action == SCTP_STREAM_RESET_PERFORMED) {
3449					/* do it */
3450					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3451				} else {
3452					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3453				}
3454			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3455				/* Answered my request */
3456				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3457				if (asoc->stream_reset_outstanding)
3458					asoc->stream_reset_outstanding--;
3459				if (action != SCTP_STREAM_RESET_PERFORMED) {
3460					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3461				}
3462			} else if (type == SCTP_STR_RESET_ADD_STREAMS) {
3463				/* Ok we now may have more streams */
3464				if (asoc->stream_reset_outstanding)
3465					asoc->stream_reset_outstanding--;
3466				if (action == SCTP_STREAM_RESET_PERFORMED) {
3467					/* Put the new streams into effect */
3468					stcb->asoc.streamoutcnt = stcb->asoc.strm_realoutsize;
3469					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_OK, stcb,
3470					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3471				} else {
3472					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_FAIL, stcb,
3473					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3474				}
3475			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3476				/**
3477				 * a) Adopt the new in tsn.
3478				 * b) reset the map
3479				 * c) Adopt the new out-tsn
3480				 */
3481				struct sctp_stream_reset_response_tsn *resp;
3482				struct sctp_forward_tsn_chunk fwdtsn;
3483				int abort_flag = 0;
3484
3485				if (respin == NULL) {
3486					/* huh ? */
3487					return (0);
3488				}
3489				if (action == SCTP_STREAM_RESET_PERFORMED) {
3490					resp = (struct sctp_stream_reset_response_tsn *)respin;
3491					asoc->stream_reset_outstanding--;
3492					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3493					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3494					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3495					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3496					if (abort_flag) {
3497						return (1);
3498					}
3499					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3500					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3501						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3502					}
3503					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3504					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3505					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3506
3507					/*
3508					 * EY 05/13/08 - nr_sack: to keep
3509					 * nr_mapping array be consistent
3510					 * with mapping_array
3511					 */
3512					if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
3513						stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3514						stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.mapping_array_base_tsn;
3515						memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
3516					}
3517					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3518					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3519
3520					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3521					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3522
3523				}
3524			}
3525			/* get rid of the request and get the request flags */
3526			if (asoc->stream_reset_outstanding == 0) {
3527				sctp_clean_up_stream_reset(stcb);
3528			}
3529		}
3530	}
3531	return (0);
3532}
3533
3534static void
3535sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3536    struct sctp_tmit_chunk *chk,
3537    struct sctp_stream_reset_in_request *req, int trunc)
3538{
3539	uint32_t seq;
3540	int len, i;
3541	int number_entries;
3542	uint16_t temp;
3543
3544	/*
3545	 * peer wants me to send a str-reset to him for my outgoing seq's if
3546	 * seq_in is right.
3547	 */
3548	struct sctp_association *asoc = &stcb->asoc;
3549
3550	seq = ntohl(req->request_seq);
3551	if (asoc->str_reset_seq_in == seq) {
3552		if (trunc) {
3553			/* Can't do it, since they exceeded our buffer size  */
3554			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3555			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3556			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3557		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3558			len = ntohs(req->ph.param_length);
3559			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3560			for (i = 0; i < number_entries; i++) {
3561				temp = ntohs(req->list_of_streams[i]);
3562				req->list_of_streams[i] = temp;
3563			}
3564			/* move the reset action back one */
3565			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3566			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3567			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3568			    asoc->str_reset_seq_out,
3569			    seq, (asoc->sending_seq - 1));
3570			asoc->stream_reset_out_is_outstanding = 1;
3571			asoc->str_reset = chk;
3572			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3573			stcb->asoc.stream_reset_outstanding++;
3574		} else {
3575			/* Can't do it, since we have sent one out */
3576			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3577			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
3578			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3579		}
3580		asoc->str_reset_seq_in++;
3581	} else if (asoc->str_reset_seq_in - 1 == seq) {
3582		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3583	} else if (asoc->str_reset_seq_in - 2 == seq) {
3584		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3585	} else {
3586		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3587	}
3588}
3589
3590static int
3591sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3592    struct sctp_tmit_chunk *chk,
3593    struct sctp_stream_reset_tsn_request *req)
3594{
3595	/* reset all in and out and update the tsn */
3596	/*
3597	 * A) reset my str-seq's on in and out. B) Select a receive next,
3598	 * and set cum-ack to it. Also process this selected number as a
3599	 * fwd-tsn as well. C) set in the response my next sending seq.
3600	 */
3601	struct sctp_forward_tsn_chunk fwdtsn;
3602	struct sctp_association *asoc = &stcb->asoc;
3603	int abort_flag = 0;
3604	uint32_t seq;
3605
3606	seq = ntohl(req->request_seq);
3607	if (asoc->str_reset_seq_in == seq) {
3608		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3609		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3610		fwdtsn.ch.chunk_flags = 0;
3611		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3612		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3613		if (abort_flag) {
3614			return (1);
3615		}
3616		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3617		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3618			sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3619		}
3620		stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3621		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3622		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3623		/*
3624		 * EY 05/13/08 -nr_sack: to keep nr_mapping array consistent
3625		 * with mapping array
3626		 */
3627		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
3628			stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3629			stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3630			memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
3631		}
3632		atomic_add_int(&stcb->asoc.sending_seq, 1);
3633		/* save off historical data for retrans */
3634		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
3635		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
3636		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
3637		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
3638
3639		sctp_add_stream_reset_result_tsn(chk,
3640		    ntohl(req->request_seq),
3641		    SCTP_STREAM_RESET_PERFORMED,
3642		    stcb->asoc.sending_seq,
3643		    stcb->asoc.mapping_array_base_tsn);
3644		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3645		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3646		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3647		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3648
3649		asoc->str_reset_seq_in++;
3650	} else if (asoc->str_reset_seq_in - 1 == seq) {
3651		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3652		    stcb->asoc.last_sending_seq[0],
3653		    stcb->asoc.last_base_tsnsent[0]
3654		    );
3655	} else if (asoc->str_reset_seq_in - 2 == seq) {
3656		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3657		    stcb->asoc.last_sending_seq[1],
3658		    stcb->asoc.last_base_tsnsent[1]
3659		    );
3660	} else {
3661		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3662	}
3663	return (0);
3664}
3665
3666static void
3667sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3668    struct sctp_tmit_chunk *chk,
3669    struct sctp_stream_reset_out_request *req, int trunc)
3670{
3671	uint32_t seq, tsn;
3672	int number_entries, len;
3673	struct sctp_association *asoc = &stcb->asoc;
3674
3675	seq = ntohl(req->request_seq);
3676
3677	/* now if its not a duplicate we process it */
3678	if (asoc->str_reset_seq_in == seq) {
3679		len = ntohs(req->ph.param_length);
3680		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3681		/*
3682		 * the sender is resetting, handle the list issue.. we must
3683		 * a) verify if we can do the reset, if so no problem b) If
3684		 * we can't do the reset we must copy the request. c) queue
3685		 * it, and setup the data in processor to trigger it off
3686		 * when needed and dequeue all the queued data.
3687		 */
3688		tsn = ntohl(req->send_reset_at_tsn);
3689
3690		/* move the reset action back one */
3691		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3692		if (trunc) {
3693			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3694			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3695		} else if ((tsn == asoc->cumulative_tsn) ||
3696		    (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3697			/* we can do it now */
3698			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3699			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3700			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3701		} else {
3702			/*
3703			 * we must queue it up and thus wait for the TSN's
3704			 * to arrive that are at or before tsn
3705			 */
3706			struct sctp_stream_reset_list *liste;
3707			int siz;
3708
3709			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3710			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3711			    siz, SCTP_M_STRESET);
3712			if (liste == NULL) {
3713				/* gak out of memory */
3714				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3715				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3716				return;
3717			}
3718			liste->tsn = tsn;
3719			liste->number_entries = number_entries;
3720			memcpy(&liste->req, req,
3721			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3722			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3723			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3724			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3725		}
3726		asoc->str_reset_seq_in++;
3727	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3728		/*
3729		 * one seq back, just echo back last action since my
3730		 * response was lost.
3731		 */
3732		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3733	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3734		/*
3735		 * two seq back, just echo back last action since my
3736		 * response was lost.
3737		 */
3738		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3739	} else {
3740		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3741	}
3742}
3743
3744static void
3745sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3746    struct sctp_stream_reset_add_strm *str_add)
3747{
3748	/*
3749	 * Peer is requesting to add more streams. If its within our
3750	 * max-streams we will allow it.
3751	 */
3752	uint16_t num_stream, i;
3753	uint32_t seq;
3754	struct sctp_association *asoc = &stcb->asoc;
3755	struct sctp_queued_to_read *ctl;
3756
3757	/* Get the number. */
3758	seq = ntohl(str_add->request_seq);
3759	num_stream = ntohs(str_add->number_of_streams);
3760	/* Now what would be the new total? */
3761	if (asoc->str_reset_seq_in == seq) {
3762		num_stream += stcb->asoc.streamincnt;
3763		if (num_stream > stcb->asoc.max_inbound_streams) {
3764			/* We must reject it they ask for to many */
3765	denied:
3766			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3767			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3768			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3769		} else {
3770			/* Ok, we can do that :-) */
3771			struct sctp_stream_in *oldstrm;
3772
3773			/* save off the old */
3774			oldstrm = stcb->asoc.strmin;
3775			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
3776			    (num_stream * sizeof(struct sctp_stream_in)),
3777			    SCTP_M_STRMI);
3778			if (stcb->asoc.strmin == NULL) {
3779				stcb->asoc.strmin = oldstrm;
3780				goto denied;
3781			}
3782			/* copy off the old data */
3783			for (i = 0; i < stcb->asoc.streamincnt; i++) {
3784				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3785				stcb->asoc.strmin[i].stream_no = i;
3786				stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
3787				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
3788				/* now anything on those queues? */
3789				while (TAILQ_EMPTY(&oldstrm[i].inqueue) == 0) {
3790					ctl = TAILQ_FIRST(&oldstrm[i].inqueue);
3791					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next);
3792					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next);
3793				}
3794			}
3795			/* Init the new streams */
3796			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
3797				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3798				stcb->asoc.strmin[i].stream_no = i;
3799				stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3800				stcb->asoc.strmin[i].delivery_started = 0;
3801			}
3802			SCTP_FREE(oldstrm, SCTP_M_STRMI);
3803			/* update the size */
3804			stcb->asoc.streamincnt = num_stream;
3805			/* Send the ack */
3806			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3807			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3808			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3809			sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK, stcb,
3810			    (uint32_t) stcb->asoc.streamincnt, NULL, SCTP_SO_NOT_LOCKED);
3811		}
3812	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3813		/*
3814		 * one seq back, just echo back last action since my
3815		 * response was lost.
3816		 */
3817		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3818	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3819		/*
3820		 * two seq back, just echo back last action since my
3821		 * response was lost.
3822		 */
3823		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3824	} else {
3825		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3826
3827	}
3828}
3829
3830#ifdef __GNUC__
3831__attribute__((noinline))
3832#endif
3833	static int
3834	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3835        struct sctp_stream_reset_out_req *sr_req)
3836{
3837	int chk_length, param_len, ptype;
3838	struct sctp_paramhdr pstore;
3839	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3840
3841	uint32_t seq;
3842	int num_req = 0;
3843	int trunc = 0;
3844	struct sctp_tmit_chunk *chk;
3845	struct sctp_chunkhdr *ch;
3846	struct sctp_paramhdr *ph;
3847	int ret_code = 0;
3848	int num_param = 0;
3849
3850	/* now it may be a reset or a reset-response */
3851	chk_length = ntohs(sr_req->ch.chunk_length);
3852
3853	/* setup for adding the response */
3854	sctp_alloc_a_chunk(stcb, chk);
3855	if (chk == NULL) {
3856		return (ret_code);
3857	}
3858	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3859	chk->rec.chunk_id.can_take_data = 0;
3860	chk->asoc = &stcb->asoc;
3861	chk->no_fr_allowed = 0;
3862	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3863	chk->book_size_scale = 0;
3864	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3865	if (chk->data == NULL) {
3866strres_nochunk:
3867		if (chk->data) {
3868			sctp_m_freem(chk->data);
3869			chk->data = NULL;
3870		}
3871		sctp_free_a_chunk(stcb, chk);
3872		return (ret_code);
3873	}
3874	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3875
3876	/* setup chunk parameters */
3877	chk->sent = SCTP_DATAGRAM_UNSENT;
3878	chk->snd_count = 0;
3879	chk->whoTo = stcb->asoc.primary_destination;
3880	atomic_add_int(&chk->whoTo->ref_count, 1);
3881
3882	ch = mtod(chk->data, struct sctp_chunkhdr *);
3883	ch->chunk_type = SCTP_STREAM_RESET;
3884	ch->chunk_flags = 0;
3885	ch->chunk_length = htons(chk->send_size);
3886	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3887	offset += sizeof(struct sctp_chunkhdr);
3888	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3889		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
3890		if (ph == NULL)
3891			break;
3892		param_len = ntohs(ph->param_length);
3893		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3894			/* bad param */
3895			break;
3896		}
3897		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
3898		    (uint8_t *) & cstore);
3899		ptype = ntohs(ph->param_type);
3900		num_param++;
3901		if (param_len > (int)sizeof(cstore)) {
3902			trunc = 1;
3903		} else {
3904			trunc = 0;
3905		}
3906
3907		if (num_param > SCTP_MAX_RESET_PARAMS) {
3908			/* hit the max of parameters already sorry.. */
3909			break;
3910		}
3911		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3912			struct sctp_stream_reset_out_request *req_out;
3913
3914			req_out = (struct sctp_stream_reset_out_request *)ph;
3915			num_req++;
3916			if (stcb->asoc.stream_reset_outstanding) {
3917				seq = ntohl(req_out->response_seq);
3918				if (seq == stcb->asoc.str_reset_seq_out) {
3919					/* implicit ack */
3920					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3921				}
3922			}
3923			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
3924		} else if (ptype == SCTP_STR_RESET_ADD_STREAMS) {
3925			struct sctp_stream_reset_add_strm *str_add;
3926
3927			str_add = (struct sctp_stream_reset_add_strm *)ph;
3928			num_req++;
3929			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
3930		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3931			struct sctp_stream_reset_in_request *req_in;
3932
3933			num_req++;
3934
3935			req_in = (struct sctp_stream_reset_in_request *)ph;
3936
3937			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
3938		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3939			struct sctp_stream_reset_tsn_request *req_tsn;
3940
3941			num_req++;
3942			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3943
3944			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3945				ret_code = 1;
3946				goto strres_nochunk;
3947			}
3948			/* no more */
3949			break;
3950		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3951			struct sctp_stream_reset_response *resp;
3952			uint32_t result;
3953
3954			resp = (struct sctp_stream_reset_response *)ph;
3955			seq = ntohl(resp->response_seq);
3956			result = ntohl(resp->result);
3957			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3958				ret_code = 1;
3959				goto strres_nochunk;
3960			}
3961		} else {
3962			break;
3963		}
3964		offset += SCTP_SIZE32(param_len);
3965		chk_length -= SCTP_SIZE32(param_len);
3966	}
3967	if (num_req == 0) {
3968		/* we have no response free the stuff */
3969		goto strres_nochunk;
3970	}
3971	/* ok we have a chunk to link in */
3972	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3973	    chk,
3974	    sctp_next);
3975	stcb->asoc.ctrl_queue_cnt++;
3976	return (ret_code);
3977}
3978
3979/*
3980 * Handle a router or endpoints report of a packet loss, there are two ways
3981 * to handle this, either we get the whole packet and must disect it
3982 * ourselves (possibly with truncation and or corruption) or it is a summary
3983 * from a middle box that did the disectting for us.
3984 */
3985static void
3986sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3987    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
3988{
3989	uint32_t bottle_bw, on_queue;
3990	uint16_t trunc_len;
3991	unsigned int chlen;
3992	unsigned int at;
3993	struct sctp_chunk_desc desc;
3994	struct sctp_chunkhdr *ch;
3995
3996	chlen = ntohs(cp->ch.chunk_length);
3997	chlen -= sizeof(struct sctp_pktdrop_chunk);
3998	/* XXX possible chlen underflow */
3999	if (chlen == 0) {
4000		ch = NULL;
4001		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4002			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4003	} else {
4004		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4005		chlen -= sizeof(struct sctphdr);
4006		/* XXX possible chlen underflow */
4007		memset(&desc, 0, sizeof(desc));
4008	}
4009	trunc_len = (uint16_t) ntohs(cp->trunc_len);
4010	if (trunc_len > limit) {
4011		trunc_len = limit;
4012	}
4013	/* now the chunks themselves */
4014	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4015		desc.chunk_type = ch->chunk_type;
4016		/* get amount we need to move */
4017		at = ntohs(ch->chunk_length);
4018		if (at < sizeof(struct sctp_chunkhdr)) {
4019			/* corrupt chunk, maybe at the end? */
4020			SCTP_STAT_INCR(sctps_pdrpcrupt);
4021			break;
4022		}
4023		if (trunc_len == 0) {
4024			/* we are supposed to have all of it */
4025			if (at > chlen) {
4026				/* corrupt skip it */
4027				SCTP_STAT_INCR(sctps_pdrpcrupt);
4028				break;
4029			}
4030		} else {
4031			/* is there enough of it left ? */
4032			if (desc.chunk_type == SCTP_DATA) {
4033				if (chlen < (sizeof(struct sctp_data_chunk) +
4034				    sizeof(desc.data_bytes))) {
4035					break;
4036				}
4037			} else {
4038				if (chlen < sizeof(struct sctp_chunkhdr)) {
4039					break;
4040				}
4041			}
4042		}
4043		if (desc.chunk_type == SCTP_DATA) {
4044			/* can we get out the tsn? */
4045			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4046				SCTP_STAT_INCR(sctps_pdrpmbda);
4047
4048			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4049				/* yep */
4050				struct sctp_data_chunk *dcp;
4051				uint8_t *ddp;
4052				unsigned int iii;
4053
4054				dcp = (struct sctp_data_chunk *)ch;
4055				ddp = (uint8_t *) (dcp + 1);
4056				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4057					desc.data_bytes[iii] = ddp[iii];
4058				}
4059				desc.tsn_ifany = dcp->dp.tsn;
4060			} else {
4061				/* nope we are done. */
4062				SCTP_STAT_INCR(sctps_pdrpnedat);
4063				break;
4064			}
4065		} else {
4066			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4067				SCTP_STAT_INCR(sctps_pdrpmbct);
4068		}
4069
4070		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4071			SCTP_STAT_INCR(sctps_pdrppdbrk);
4072			break;
4073		}
4074		if (SCTP_SIZE32(at) > chlen) {
4075			break;
4076		}
4077		chlen -= SCTP_SIZE32(at);
4078		if (chlen < sizeof(struct sctp_chunkhdr)) {
4079			/* done, none left */
4080			break;
4081		}
4082		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4083	}
4084	/* Now update any rwnd --- possibly */
4085	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4086		/* From a peer, we get a rwnd report */
4087		uint32_t a_rwnd;
4088
4089		SCTP_STAT_INCR(sctps_pdrpfehos);
4090
4091		bottle_bw = ntohl(cp->bottle_bw);
4092		on_queue = ntohl(cp->current_onq);
4093		if (bottle_bw && on_queue) {
4094			/* a rwnd report is in here */
4095			if (bottle_bw > on_queue)
4096				a_rwnd = bottle_bw - on_queue;
4097			else
4098				a_rwnd = 0;
4099
4100			if (a_rwnd == 0)
4101				stcb->asoc.peers_rwnd = 0;
4102			else {
4103				if (a_rwnd > stcb->asoc.total_flight) {
4104					stcb->asoc.peers_rwnd =
4105					    a_rwnd - stcb->asoc.total_flight;
4106				} else {
4107					stcb->asoc.peers_rwnd = 0;
4108				}
4109				if (stcb->asoc.peers_rwnd <
4110				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4111					/* SWS sender side engages */
4112					stcb->asoc.peers_rwnd = 0;
4113				}
4114			}
4115		}
4116	} else {
4117		SCTP_STAT_INCR(sctps_pdrpfmbox);
4118	}
4119
4120	/* now middle boxes in sat networks get a cwnd bump */
4121	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4122	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4123	    (stcb->asoc.sat_network)) {
4124		/*
4125		 * This is debateable but for sat networks it makes sense
4126		 * Note if a T3 timer has went off, we will prohibit any
4127		 * changes to cwnd until we exit the t3 loss recovery.
4128		 */
4129		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4130		    net, cp, &bottle_bw, &on_queue);
4131	}
4132}
4133
4134/*
4135 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4136 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4137 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4138 * length of the complete packet outputs: - length: modified to remaining
4139 * length after control processing - netp: modified to new sctp_nets after
4140 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4141 * bad packet,...) otherwise return the tcb for this packet
4142 */
4143#ifdef __GNUC__
4144__attribute__((noinline))
4145#endif
4146	static struct sctp_tcb *
4147	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4148             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4149             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4150             uint32_t vrf_id, uint16_t port)
4151{
4152	struct sctp_association *asoc;
4153	uint32_t vtag_in;
4154	int num_chunks = 0;	/* number of control chunks processed */
4155	uint32_t chk_length;
4156	int ret;
4157	int abort_no_unlock = 0;
4158
4159	/*
4160	 * How big should this be, and should it be alloc'd? Lets try the
4161	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4162	 * until we get into jumbo grams and such..
4163	 */
4164	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4165	struct sctp_tcb *locked_tcb = stcb;
4166	int got_auth = 0;
4167	uint32_t auth_offset = 0, auth_len = 0;
4168	int auth_skipped = 0;
4169	int asconf_cnt = 0;
4170
4171#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4172	struct socket *so;
4173
4174#endif
4175
4176	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4177	    iphlen, *offset, length, stcb);
4178
4179	/* validate chunk header length... */
4180	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4181		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4182		    ntohs(ch->chunk_length));
4183		if (locked_tcb) {
4184			SCTP_TCB_UNLOCK(locked_tcb);
4185		}
4186		return (NULL);
4187	}
4188	/*
4189	 * validate the verification tag
4190	 */
4191	vtag_in = ntohl(sh->v_tag);
4192
4193	if (locked_tcb) {
4194		SCTP_TCB_LOCK_ASSERT(locked_tcb);
4195	}
4196	if (ch->chunk_type == SCTP_INITIATION) {
4197		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4198		    ntohs(ch->chunk_length), vtag_in);
4199		if (vtag_in != 0) {
4200			/* protocol error- silently discard... */
4201			SCTP_STAT_INCR(sctps_badvtag);
4202			if (locked_tcb) {
4203				SCTP_TCB_UNLOCK(locked_tcb);
4204			}
4205			return (NULL);
4206		}
4207	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4208		/*
4209		 * If there is no stcb, skip the AUTH chunk and process
4210		 * later after a stcb is found (to validate the lookup was
4211		 * valid.
4212		 */
4213		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4214		    (stcb == NULL) &&
4215		    !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4216			/* save this chunk for later processing */
4217			auth_skipped = 1;
4218			auth_offset = *offset;
4219			auth_len = ntohs(ch->chunk_length);
4220
4221			/* (temporarily) move past this chunk */
4222			*offset += SCTP_SIZE32(auth_len);
4223			if (*offset >= length) {
4224				/* no more data left in the mbuf chain */
4225				*offset = length;
4226				if (locked_tcb) {
4227					SCTP_TCB_UNLOCK(locked_tcb);
4228				}
4229				return (NULL);
4230			}
4231			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4232			    sizeof(struct sctp_chunkhdr), chunk_buf);
4233		}
4234		if (ch == NULL) {
4235			/* Help */
4236			*offset = length;
4237			if (locked_tcb) {
4238				SCTP_TCB_UNLOCK(locked_tcb);
4239			}
4240			return (NULL);
4241		}
4242		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4243			goto process_control_chunks;
4244		}
4245		/*
4246		 * first check if it's an ASCONF with an unknown src addr we
4247		 * need to look inside to find the association
4248		 */
4249		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4250			struct sctp_chunkhdr *asconf_ch = ch;
4251			uint32_t asconf_offset = 0, asconf_len = 0;
4252
4253			/* inp's refcount may be reduced */
4254			SCTP_INP_INCR_REF(inp);
4255
4256			asconf_offset = *offset;
4257			do {
4258				asconf_len = ntohs(asconf_ch->chunk_length);
4259				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4260					break;
4261				stcb = sctp_findassociation_ep_asconf(m, iphlen,
4262				    *offset, sh, &inp, netp, vrf_id);
4263				if (stcb != NULL)
4264					break;
4265				asconf_offset += SCTP_SIZE32(asconf_len);
4266				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4267				    sizeof(struct sctp_chunkhdr), chunk_buf);
4268			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4269			if (stcb == NULL) {
4270				/*
4271				 * reduce inp's refcount if not reduced in
4272				 * sctp_findassociation_ep_asconf().
4273				 */
4274				SCTP_INP_DECR_REF(inp);
4275			} else {
4276				locked_tcb = stcb;
4277			}
4278
4279			/* now go back and verify any auth chunk to be sure */
4280			if (auth_skipped && (stcb != NULL)) {
4281				struct sctp_auth_chunk *auth;
4282
4283				auth = (struct sctp_auth_chunk *)
4284				    sctp_m_getptr(m, auth_offset,
4285				    auth_len, chunk_buf);
4286				got_auth = 1;
4287				auth_skipped = 0;
4288				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4289				    auth_offset)) {
4290					/* auth HMAC failed so dump it */
4291					*offset = length;
4292					if (locked_tcb) {
4293						SCTP_TCB_UNLOCK(locked_tcb);
4294					}
4295					return (NULL);
4296				} else {
4297					/* remaining chunks are HMAC checked */
4298					stcb->asoc.authenticated = 1;
4299				}
4300			}
4301		}
4302		if (stcb == NULL) {
4303			/* no association, so it's out of the blue... */
4304			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
4305			    vrf_id, port);
4306			*offset = length;
4307			if (locked_tcb) {
4308				SCTP_TCB_UNLOCK(locked_tcb);
4309			}
4310			return (NULL);
4311		}
4312		asoc = &stcb->asoc;
4313		/* ABORT and SHUTDOWN can use either v_tag... */
4314		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4315		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4316		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4317			if ((vtag_in == asoc->my_vtag) ||
4318			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
4319			    (vtag_in == asoc->peer_vtag))) {
4320				/* this is valid */
4321			} else {
4322				/* drop this packet... */
4323				SCTP_STAT_INCR(sctps_badvtag);
4324				if (locked_tcb) {
4325					SCTP_TCB_UNLOCK(locked_tcb);
4326				}
4327				return (NULL);
4328			}
4329		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4330			if (vtag_in != asoc->my_vtag) {
4331				/*
4332				 * this could be a stale SHUTDOWN-ACK or the
4333				 * peer never got the SHUTDOWN-COMPLETE and
4334				 * is still hung; we have started a new asoc
4335				 * but it won't complete until the shutdown
4336				 * is completed
4337				 */
4338				if (locked_tcb) {
4339					SCTP_TCB_UNLOCK(locked_tcb);
4340				}
4341				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
4342				    NULL, vrf_id, port);
4343				return (NULL);
4344			}
4345		} else {
4346			/* for all other chunks, vtag must match */
4347			if (vtag_in != asoc->my_vtag) {
4348				/* invalid vtag... */
4349				SCTPDBG(SCTP_DEBUG_INPUT3,
4350				    "invalid vtag: %xh, expect %xh\n",
4351				    vtag_in, asoc->my_vtag);
4352				SCTP_STAT_INCR(sctps_badvtag);
4353				if (locked_tcb) {
4354					SCTP_TCB_UNLOCK(locked_tcb);
4355				}
4356				*offset = length;
4357				return (NULL);
4358			}
4359		}
4360	}			/* end if !SCTP_COOKIE_ECHO */
4361	/*
4362	 * process all control chunks...
4363	 */
4364	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4365	/* EY */
4366	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4367	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4368	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4369		/* implied cookie-ack.. we must have lost the ack */
4370		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4371			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4372			    stcb->asoc.overall_error_count,
4373			    0,
4374			    SCTP_FROM_SCTP_INPUT,
4375			    __LINE__);
4376		}
4377		stcb->asoc.overall_error_count = 0;
4378		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4379		    *netp);
4380	}
4381process_control_chunks:
4382	while (IS_SCTP_CONTROL(ch)) {
4383		/* validate chunk length */
4384		chk_length = ntohs(ch->chunk_length);
4385		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4386		    ch->chunk_type, chk_length);
4387		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4388		if (chk_length < sizeof(*ch) ||
4389		    (*offset + (int)chk_length) > length) {
4390			*offset = length;
4391			if (locked_tcb) {
4392				SCTP_TCB_UNLOCK(locked_tcb);
4393			}
4394			return (NULL);
4395		}
4396		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4397		/*
4398		 * INIT-ACK only gets the init ack "header" portion only
4399		 * because we don't have to process the peer's COOKIE. All
4400		 * others get a complete chunk.
4401		 */
4402		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4403		    (ch->chunk_type == SCTP_INITIATION)) {
4404			/* get an init-ack chunk */
4405			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4406			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4407			if (ch == NULL) {
4408				*offset = length;
4409				if (locked_tcb) {
4410					SCTP_TCB_UNLOCK(locked_tcb);
4411				}
4412				return (NULL);
4413			}
4414		} else {
4415			/* For cookies and all other chunks. */
4416			if (chk_length > sizeof(chunk_buf)) {
4417				/*
4418				 * use just the size of the chunk buffer so
4419				 * the front part of our chunks fit in
4420				 * contiguous space up to the chunk buffer
4421				 * size (508 bytes). For chunks that need to
4422				 * get more than that they must use the
4423				 * sctp_m_getptr() function or other means
4424				 * (e.g. know how to parse mbuf chains).
4425				 * Cookies do this already.
4426				 */
4427				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4428				    (sizeof(chunk_buf) - 4),
4429				    chunk_buf);
4430				if (ch == NULL) {
4431					*offset = length;
4432					if (locked_tcb) {
4433						SCTP_TCB_UNLOCK(locked_tcb);
4434					}
4435					return (NULL);
4436				}
4437			} else {
4438				/* We can fit it all */
4439				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4440				    chk_length, chunk_buf);
4441				if (ch == NULL) {
4442					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4443					*offset = length;
4444					if (locked_tcb) {
4445						SCTP_TCB_UNLOCK(locked_tcb);
4446					}
4447					return (NULL);
4448				}
4449			}
4450		}
4451		num_chunks++;
4452		/* Save off the last place we got a control from */
4453		if (stcb != NULL) {
4454			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4455				/*
4456				 * allow last_control to be NULL if
4457				 * ASCONF... ASCONF processing will find the
4458				 * right net later
4459				 */
4460				if ((netp != NULL) && (*netp != NULL))
4461					stcb->asoc.last_control_chunk_from = *netp;
4462			}
4463		}
4464#ifdef SCTP_AUDITING_ENABLED
4465		sctp_audit_log(0xB0, ch->chunk_type);
4466#endif
4467
4468		/* check to see if this chunk required auth, but isn't */
4469		if ((stcb != NULL) &&
4470		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
4471		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4472		    !stcb->asoc.authenticated) {
4473			/* "silently" ignore */
4474			SCTP_STAT_INCR(sctps_recvauthmissing);
4475			goto next_chunk;
4476		}
4477		switch (ch->chunk_type) {
4478		case SCTP_INITIATION:
4479			/* must be first and only chunk */
4480			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4481			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4482				/* We are not interested anymore? */
4483				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4484					/*
4485					 * collision case where we are
4486					 * sending to them too
4487					 */
4488					;
4489				} else {
4490					if (locked_tcb) {
4491						SCTP_TCB_UNLOCK(locked_tcb);
4492					}
4493					*offset = length;
4494					return (NULL);
4495				}
4496			}
4497			if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
4498			    (num_chunks > 1) ||
4499			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4500				*offset = length;
4501				if (locked_tcb) {
4502					SCTP_TCB_UNLOCK(locked_tcb);
4503				}
4504				return (NULL);
4505			}
4506			if ((stcb != NULL) &&
4507			    (SCTP_GET_STATE(&stcb->asoc) ==
4508			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4509				sctp_send_shutdown_ack(stcb,
4510				    stcb->asoc.primary_destination);
4511				*offset = length;
4512				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4513				if (locked_tcb) {
4514					SCTP_TCB_UNLOCK(locked_tcb);
4515				}
4516				return (NULL);
4517			}
4518			if (netp) {
4519				sctp_handle_init(m, iphlen, *offset, sh,
4520				    (struct sctp_init_chunk *)ch, inp,
4521				    stcb, *netp, &abort_no_unlock, vrf_id, port);
4522			}
4523			if (abort_no_unlock)
4524				return (NULL);
4525
4526			*offset = length;
4527			if (locked_tcb) {
4528				SCTP_TCB_UNLOCK(locked_tcb);
4529			}
4530			return (NULL);
4531			break;
4532		case SCTP_PAD_CHUNK:
4533			break;
4534		case SCTP_INITIATION_ACK:
4535			/* must be first and only chunk */
4536			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4537			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4538				/* We are not interested anymore */
4539				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4540					;
4541				} else {
4542					if (locked_tcb) {
4543						SCTP_TCB_UNLOCK(locked_tcb);
4544					}
4545					*offset = length;
4546					if (stcb) {
4547#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4548						so = SCTP_INP_SO(inp);
4549						atomic_add_int(&stcb->asoc.refcnt, 1);
4550						SCTP_TCB_UNLOCK(stcb);
4551						SCTP_SOCKET_LOCK(so, 1);
4552						SCTP_TCB_LOCK(stcb);
4553						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4554#endif
4555						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4556#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4557						SCTP_SOCKET_UNLOCK(so, 1);
4558#endif
4559					}
4560					return (NULL);
4561				}
4562			}
4563			if ((num_chunks > 1) ||
4564			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4565				*offset = length;
4566				if (locked_tcb) {
4567					SCTP_TCB_UNLOCK(locked_tcb);
4568				}
4569				return (NULL);
4570			}
4571			if ((netp) && (*netp)) {
4572				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
4573				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
4574			} else {
4575				ret = -1;
4576			}
4577			/*
4578			 * Special case, I must call the output routine to
4579			 * get the cookie echoed
4580			 */
4581			if (abort_no_unlock)
4582				return (NULL);
4583
4584			if ((stcb) && ret == 0)
4585				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4586			*offset = length;
4587			if (locked_tcb) {
4588				SCTP_TCB_UNLOCK(locked_tcb);
4589			}
4590			return (NULL);
4591			break;
4592		case SCTP_SELECTIVE_ACK:
4593			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4594			SCTP_STAT_INCR(sctps_recvsacks);
4595			{
4596				struct sctp_sack_chunk *sack;
4597				int abort_now = 0;
4598				uint32_t a_rwnd, cum_ack;
4599				uint16_t num_seg;
4600				int nonce_sum_flag;
4601
4602				if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
4603					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
4604					*offset = length;
4605					if (locked_tcb) {
4606						SCTP_TCB_UNLOCK(locked_tcb);
4607					}
4608					return (NULL);
4609				}
4610				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4611					/*-
4612					 * If we have sent a shutdown-ack, we will pay no
4613					 * attention to a sack sent in to us since
4614					 * we don't care anymore.
4615					 */
4616					break;
4617				}
4618				sack = (struct sctp_sack_chunk *)ch;
4619				nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
4620				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4621				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4622				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4623				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4624				    cum_ack,
4625				    num_seg,
4626				    a_rwnd
4627				    );
4628				stcb->asoc.seen_a_sack_this_pkt = 1;
4629				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4630				    (num_seg == 0) &&
4631				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4632				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4633				    (stcb->asoc.saw_sack_with_frags == 0) &&
4634				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4635				    ) {
4636					/*
4637					 * We have a SIMPLE sack having no
4638					 * prior segments and data on sent
4639					 * queue to be acked.. Use the
4640					 * faster path sack processing. We
4641					 * also allow window update sacks
4642					 * with no missing segments to go
4643					 * this way too.
4644					 */
4645					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4646					    &abort_now);
4647				} else {
4648					if (netp && *netp)
4649						sctp_handle_sack(m, *offset,
4650						    sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
4651				}
4652				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4653				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4654				    (stcb->asoc.stream_queue_cnt == 0)) {
4655					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4656				}
4657				if (abort_now) {
4658					/* ABORT signal from sack processing */
4659					*offset = length;
4660					return (NULL);
4661				}
4662			}
4663			break;
4664			/*
4665			 * EY - nr_sack:  If the received chunk is an
4666			 * nr_sack chunk
4667			 */
4668		case SCTP_NR_SELECTIVE_ACK:
4669			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
4670			SCTP_STAT_INCR(sctps_recvsacks);
4671			{
4672				struct sctp_nr_sack_chunk *nr_sack;
4673				int abort_now = 0;
4674				uint32_t a_rwnd, cum_ack;
4675				uint16_t num_seg, num_nr_seg;
4676				int nonce_sum_flag;
4677
4678				if ((stcb == NULL) || (chk_length < sizeof(struct sctp_nr_sack_chunk))) {
4679					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on nr_sack chunk, too small\n");
4680			ignore_nr_sack:
4681					*offset = length;
4682					if (locked_tcb) {
4683						SCTP_TCB_UNLOCK(locked_tcb);
4684					}
4685					return (NULL);
4686				}
4687				/*
4688				 * EY nr_sacks have not been negotiated but
4689				 * the peer end sent an nr_sack, silently
4690				 * discard the chunk
4691				 */
4692				if (!(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)) {
4693					goto unknown_chunk;
4694				}
4695				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4696					/*-
4697					 * If we have sent a shutdown-ack, we will pay no
4698					 * attention to a sack sent in to us since
4699					 * we don't care anymore.
4700					 */
4701					goto ignore_nr_sack;
4702				}
4703				nr_sack = (struct sctp_nr_sack_chunk *)ch;
4704				nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
4705
4706				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4707				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4708				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4709				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
4710				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4711				    cum_ack,
4712				    num_seg,
4713				    a_rwnd
4714				    );
4715				stcb->asoc.seen_a_sack_this_pkt = 1;
4716				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4717				    (num_seg == 0) &&
4718				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4719				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4720				    (stcb->asoc.saw_sack_with_frags == 0) &&
4721				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4722				    ) {
4723					/*
4724					 * We have a SIMPLE sack having no
4725					 * prior segments and data on sent
4726					 * queue to be acked.. Use the
4727					 * faster path sack processing. We
4728					 * also allow window update sacks
4729					 * with no missing segments to go
4730					 * this way too.
4731					 */
4732					sctp_express_handle_nr_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4733					    &abort_now);
4734				} else {
4735					if (netp && *netp)
4736						sctp_handle_nr_sack(m, *offset,
4737						    nr_sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
4738				}
4739				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4740				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4741				    (stcb->asoc.stream_queue_cnt == 0)) {
4742					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4743				}
4744				if (abort_now) {
4745					/* ABORT signal from sack processing */
4746					*offset = length;
4747					return (NULL);
4748				}
4749			}
4750			break;
4751
4752		case SCTP_HEARTBEAT_REQUEST:
4753			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
4754			if ((stcb) && netp && *netp) {
4755				SCTP_STAT_INCR(sctps_recvheartbeat);
4756				sctp_send_heartbeat_ack(stcb, m, *offset,
4757				    chk_length, *netp);
4758
4759				/* He's alive so give him credit */
4760				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4761					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4762					    stcb->asoc.overall_error_count,
4763					    0,
4764					    SCTP_FROM_SCTP_INPUT,
4765					    __LINE__);
4766				}
4767				stcb->asoc.overall_error_count = 0;
4768			}
4769			break;
4770		case SCTP_HEARTBEAT_ACK:
4771			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
4772			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
4773				/* Its not ours */
4774				*offset = length;
4775				if (locked_tcb) {
4776					SCTP_TCB_UNLOCK(locked_tcb);
4777				}
4778				return (NULL);
4779			}
4780			/* He's alive so give him credit */
4781			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4782				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4783				    stcb->asoc.overall_error_count,
4784				    0,
4785				    SCTP_FROM_SCTP_INPUT,
4786				    __LINE__);
4787			}
4788			stcb->asoc.overall_error_count = 0;
4789			SCTP_STAT_INCR(sctps_recvheartbeatack);
4790			if (netp && *netp)
4791				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
4792				    stcb, *netp);
4793			break;
4794		case SCTP_ABORT_ASSOCIATION:
4795			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
4796			    stcb);
4797			if ((stcb) && netp && *netp)
4798				sctp_handle_abort((struct sctp_abort_chunk *)ch,
4799				    stcb, *netp);
4800			*offset = length;
4801			return (NULL);
4802			break;
4803		case SCTP_SHUTDOWN:
4804			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
4805			    stcb);
4806			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
4807				*offset = length;
4808				if (locked_tcb) {
4809					SCTP_TCB_UNLOCK(locked_tcb);
4810				}
4811				return (NULL);
4812			}
4813			if (netp && *netp) {
4814				int abort_flag = 0;
4815
4816				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
4817				    stcb, *netp, &abort_flag);
4818				if (abort_flag) {
4819					*offset = length;
4820					return (NULL);
4821				}
4822			}
4823			break;
4824		case SCTP_SHUTDOWN_ACK:
4825			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
4826			if ((stcb) && (netp) && (*netp))
4827				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
4828			*offset = length;
4829			return (NULL);
4830			break;
4831
4832		case SCTP_OPERATION_ERROR:
4833			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
4834			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
4835
4836				*offset = length;
4837				return (NULL);
4838			}
4839			break;
4840		case SCTP_COOKIE_ECHO:
4841			SCTPDBG(SCTP_DEBUG_INPUT3,
4842			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
4843			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4844				;
4845			} else {
4846				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4847					/* We are not interested anymore */
4848					*offset = length;
4849					return (NULL);
4850				}
4851			}
4852			/*
4853			 * First are we accepting? We do this again here
4854			 * sincen it is possible that a previous endpoint
4855			 * WAS listening responded to a INIT-ACK and then
4856			 * closed. We opened and bound.. and are now no
4857			 * longer listening.
4858			 */
4859
4860			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
4861				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4862				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
4863					struct mbuf *oper;
4864					struct sctp_paramhdr *phdr;
4865
4866					oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4867					    0, M_DONTWAIT, 1, MT_DATA);
4868					if (oper) {
4869						SCTP_BUF_LEN(oper) =
4870						    sizeof(struct sctp_paramhdr);
4871						phdr = mtod(oper,
4872						    struct sctp_paramhdr *);
4873						phdr->param_type =
4874						    htons(SCTP_CAUSE_OUT_OF_RESC);
4875						phdr->param_length =
4876						    htons(sizeof(struct sctp_paramhdr));
4877					}
4878					sctp_abort_association(inp, stcb, m,
4879					    iphlen, sh, oper, vrf_id, port);
4880				}
4881				*offset = length;
4882				return (NULL);
4883			} else {
4884				struct mbuf *ret_buf;
4885				struct sctp_inpcb *linp;
4886
4887				if (stcb) {
4888					linp = NULL;
4889				} else {
4890					linp = inp;
4891				}
4892
4893				if (linp) {
4894					SCTP_ASOC_CREATE_LOCK(linp);
4895				}
4896				if (netp) {
4897					ret_buf =
4898					    sctp_handle_cookie_echo(m, iphlen,
4899					    *offset, sh,
4900					    (struct sctp_cookie_echo_chunk *)ch,
4901					    &inp, &stcb, netp,
4902					    auth_skipped,
4903					    auth_offset,
4904					    auth_len,
4905					    &locked_tcb,
4906					    vrf_id,
4907					    port);
4908				} else {
4909					ret_buf = NULL;
4910				}
4911				if (linp) {
4912					SCTP_ASOC_CREATE_UNLOCK(linp);
4913				}
4914				if (ret_buf == NULL) {
4915					if (locked_tcb) {
4916						SCTP_TCB_UNLOCK(locked_tcb);
4917					}
4918					SCTPDBG(SCTP_DEBUG_INPUT3,
4919					    "GAK, null buffer\n");
4920					auth_skipped = 0;
4921					*offset = length;
4922					return (NULL);
4923				}
4924				/* if AUTH skipped, see if it verified... */
4925				if (auth_skipped) {
4926					got_auth = 1;
4927					auth_skipped = 0;
4928				}
4929				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4930					/*
4931					 * Restart the timer if we have
4932					 * pending data
4933					 */
4934					struct sctp_tmit_chunk *chk;
4935
4936					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4937					if (chk) {
4938						sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4939						    stcb->sctp_ep, stcb,
4940						    chk->whoTo);
4941					}
4942				}
4943			}
4944			break;
4945		case SCTP_COOKIE_ACK:
4946			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4947			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4948				if (locked_tcb) {
4949					SCTP_TCB_UNLOCK(locked_tcb);
4950				}
4951				return (NULL);
4952			}
4953			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4954				/* We are not interested anymore */
4955				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4956					;
4957				} else if (stcb) {
4958#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4959					so = SCTP_INP_SO(inp);
4960					atomic_add_int(&stcb->asoc.refcnt, 1);
4961					SCTP_TCB_UNLOCK(stcb);
4962					SCTP_SOCKET_LOCK(so, 1);
4963					SCTP_TCB_LOCK(stcb);
4964					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4965#endif
4966					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4967#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4968					SCTP_SOCKET_UNLOCK(so, 1);
4969#endif
4970					*offset = length;
4971					return (NULL);
4972				}
4973			}
4974			/* He's alive so give him credit */
4975			if ((stcb) && netp && *netp) {
4976				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4977					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4978					    stcb->asoc.overall_error_count,
4979					    0,
4980					    SCTP_FROM_SCTP_INPUT,
4981					    __LINE__);
4982				}
4983				stcb->asoc.overall_error_count = 0;
4984				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4985			}
4986			break;
4987		case SCTP_ECN_ECHO:
4988			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4989			/* He's alive so give him credit */
4990			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4991				/* Its not ours */
4992				if (locked_tcb) {
4993					SCTP_TCB_UNLOCK(locked_tcb);
4994				}
4995				*offset = length;
4996				return (NULL);
4997			}
4998			if (stcb) {
4999				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5000					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5001					    stcb->asoc.overall_error_count,
5002					    0,
5003					    SCTP_FROM_SCTP_INPUT,
5004					    __LINE__);
5005				}
5006				stcb->asoc.overall_error_count = 0;
5007				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
5008				    stcb);
5009			}
5010			break;
5011		case SCTP_ECN_CWR:
5012			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
5013			/* He's alive so give him credit */
5014			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5015				/* Its not ours */
5016				if (locked_tcb) {
5017					SCTP_TCB_UNLOCK(locked_tcb);
5018				}
5019				*offset = length;
5020				return (NULL);
5021			}
5022			if (stcb) {
5023				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5024					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5025					    stcb->asoc.overall_error_count,
5026					    0,
5027					    SCTP_FROM_SCTP_INPUT,
5028					    __LINE__);
5029				}
5030				stcb->asoc.overall_error_count = 0;
5031				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
5032			}
5033			break;
5034		case SCTP_SHUTDOWN_COMPLETE:
5035			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
5036			/* must be first and only chunk */
5037			if ((num_chunks > 1) ||
5038			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5039				*offset = length;
5040				if (locked_tcb) {
5041					SCTP_TCB_UNLOCK(locked_tcb);
5042				}
5043				return (NULL);
5044			}
5045			if ((stcb) && netp && *netp) {
5046				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5047				    stcb, *netp);
5048			}
5049			*offset = length;
5050			return (NULL);
5051			break;
5052		case SCTP_ASCONF:
5053			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5054			/* He's alive so give him credit */
5055			if (stcb) {
5056				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5057					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5058					    stcb->asoc.overall_error_count,
5059					    0,
5060					    SCTP_FROM_SCTP_INPUT,
5061					    __LINE__);
5062				}
5063				stcb->asoc.overall_error_count = 0;
5064				sctp_handle_asconf(m, *offset,
5065				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5066				asconf_cnt++;
5067			}
5068			break;
5069		case SCTP_ASCONF_ACK:
5070			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
5071			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5072				/* Its not ours */
5073				if (locked_tcb) {
5074					SCTP_TCB_UNLOCK(locked_tcb);
5075				}
5076				*offset = length;
5077				return (NULL);
5078			}
5079			if ((stcb) && netp && *netp) {
5080				/* He's alive so give him credit */
5081				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5082					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5083					    stcb->asoc.overall_error_count,
5084					    0,
5085					    SCTP_FROM_SCTP_INPUT,
5086					    __LINE__);
5087				}
5088				stcb->asoc.overall_error_count = 0;
5089				sctp_handle_asconf_ack(m, *offset,
5090				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5091				if (abort_no_unlock)
5092					return (NULL);
5093			}
5094			break;
5095		case SCTP_FORWARD_CUM_TSN:
5096			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
5097			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5098				/* Its not ours */
5099				if (locked_tcb) {
5100					SCTP_TCB_UNLOCK(locked_tcb);
5101				}
5102				*offset = length;
5103				return (NULL);
5104			}
5105			/* He's alive so give him credit */
5106			if (stcb) {
5107				int abort_flag = 0;
5108
5109				stcb->asoc.overall_error_count = 0;
5110				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5111					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5112					    stcb->asoc.overall_error_count,
5113					    0,
5114					    SCTP_FROM_SCTP_INPUT,
5115					    __LINE__);
5116				}
5117				*fwd_tsn_seen = 1;
5118				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5119					/* We are not interested anymore */
5120#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5121					so = SCTP_INP_SO(inp);
5122					atomic_add_int(&stcb->asoc.refcnt, 1);
5123					SCTP_TCB_UNLOCK(stcb);
5124					SCTP_SOCKET_LOCK(so, 1);
5125					SCTP_TCB_LOCK(stcb);
5126					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5127#endif
5128					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5129#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5130					SCTP_SOCKET_UNLOCK(so, 1);
5131#endif
5132					*offset = length;
5133					return (NULL);
5134				}
5135				sctp_handle_forward_tsn(stcb,
5136				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5137				if (abort_flag) {
5138					*offset = length;
5139					return (NULL);
5140				} else {
5141					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5142						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5143						    stcb->asoc.overall_error_count,
5144						    0,
5145						    SCTP_FROM_SCTP_INPUT,
5146						    __LINE__);
5147					}
5148					stcb->asoc.overall_error_count = 0;
5149				}
5150
5151			}
5152			break;
5153		case SCTP_STREAM_RESET:
5154			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5155			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5156				/* Its not ours */
5157				if (locked_tcb) {
5158					SCTP_TCB_UNLOCK(locked_tcb);
5159				}
5160				*offset = length;
5161				return (NULL);
5162			}
5163			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5164				/* We are not interested anymore */
5165#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5166				so = SCTP_INP_SO(inp);
5167				atomic_add_int(&stcb->asoc.refcnt, 1);
5168				SCTP_TCB_UNLOCK(stcb);
5169				SCTP_SOCKET_LOCK(so, 1);
5170				SCTP_TCB_LOCK(stcb);
5171				atomic_subtract_int(&stcb->asoc.refcnt, 1);
5172#endif
5173				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5174#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5175				SCTP_SOCKET_UNLOCK(so, 1);
5176#endif
5177				*offset = length;
5178				return (NULL);
5179			}
5180			if (stcb->asoc.peer_supports_strreset == 0) {
5181				/*
5182				 * hmm, peer should have announced this, but
5183				 * we will turn it on since he is sending us
5184				 * a stream reset.
5185				 */
5186				stcb->asoc.peer_supports_strreset = 1;
5187			}
5188			if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
5189				/* stop processing */
5190				*offset = length;
5191				return (NULL);
5192			}
5193			break;
5194		case SCTP_PACKET_DROPPED:
5195			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5196			/* re-get it all please */
5197			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5198				/* Its not ours */
5199				if (locked_tcb) {
5200					SCTP_TCB_UNLOCK(locked_tcb);
5201				}
5202				*offset = length;
5203				return (NULL);
5204			}
5205			if (ch && (stcb) && netp && (*netp)) {
5206				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5207				    stcb, *netp,
5208				    min(chk_length, (sizeof(chunk_buf) - 4)));
5209
5210			}
5211			break;
5212
5213		case SCTP_AUTHENTICATION:
5214			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5215			if (SCTP_BASE_SYSCTL(sctp_auth_disable))
5216				goto unknown_chunk;
5217
5218			if (stcb == NULL) {
5219				/* save the first AUTH for later processing */
5220				if (auth_skipped == 0) {
5221					auth_offset = *offset;
5222					auth_len = chk_length;
5223					auth_skipped = 1;
5224				}
5225				/* skip this chunk (temporarily) */
5226				goto next_chunk;
5227			}
5228			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5229			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5230			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5231				/* Its not ours */
5232				if (locked_tcb) {
5233					SCTP_TCB_UNLOCK(locked_tcb);
5234				}
5235				*offset = length;
5236				return (NULL);
5237			}
5238			if (got_auth == 1) {
5239				/* skip this chunk... it's already auth'd */
5240				goto next_chunk;
5241			}
5242			got_auth = 1;
5243			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5244			    m, *offset)) {
5245				/* auth HMAC failed so dump the packet */
5246				*offset = length;
5247				return (stcb);
5248			} else {
5249				/* remaining chunks are HMAC checked */
5250				stcb->asoc.authenticated = 1;
5251			}
5252			break;
5253
5254		default:
5255	unknown_chunk:
5256			/* it's an unknown chunk! */
5257			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5258				struct mbuf *mm;
5259				struct sctp_paramhdr *phd;
5260
5261				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
5262				    0, M_DONTWAIT, 1, MT_DATA);
5263				if (mm) {
5264					phd = mtod(mm, struct sctp_paramhdr *);
5265					/*
5266					 * We cheat and use param type since
5267					 * we did not bother to define a
5268					 * error cause struct. They are the
5269					 * same basic format with different
5270					 * names.
5271					 */
5272					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5273					phd->param_length = htons(chk_length + sizeof(*phd));
5274					SCTP_BUF_LEN(mm) = sizeof(*phd);
5275					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
5276					    M_DONTWAIT);
5277					if (SCTP_BUF_NEXT(mm)) {
5278#ifdef SCTP_MBUF_LOGGING
5279						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5280							struct mbuf *mat;
5281
5282							mat = SCTP_BUF_NEXT(mm);
5283							while (mat) {
5284								if (SCTP_BUF_IS_EXTENDED(mat)) {
5285									sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5286								}
5287								mat = SCTP_BUF_NEXT(mat);
5288							}
5289						}
5290#endif
5291						sctp_queue_op_err(stcb, mm);
5292					} else {
5293						sctp_m_freem(mm);
5294					}
5295				}
5296			}
5297			if ((ch->chunk_type & 0x80) == 0) {
5298				/* discard this packet */
5299				*offset = length;
5300				return (stcb);
5301			}	/* else skip this bad chunk and continue... */
5302			break;
5303		}		/* switch (ch->chunk_type) */
5304
5305
5306next_chunk:
5307		/* get the next chunk */
5308		*offset += SCTP_SIZE32(chk_length);
5309		if (*offset >= length) {
5310			/* no more data left in the mbuf chain */
5311			break;
5312		}
5313		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5314		    sizeof(struct sctp_chunkhdr), chunk_buf);
5315		if (ch == NULL) {
5316			if (locked_tcb) {
5317				SCTP_TCB_UNLOCK(locked_tcb);
5318			}
5319			*offset = length;
5320			return (NULL);
5321		}
5322	}			/* while */
5323
5324	if (asconf_cnt > 0 && stcb != NULL) {
5325		sctp_send_asconf_ack(stcb);
5326	}
5327	return (stcb);
5328}
5329
5330
5331/*
5332 * Process the ECN bits we have something set so we must look to see if it is
5333 * ECN(0) or ECN(1) or CE
5334 */
5335static void
5336sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
5337    uint8_t ecn_bits)
5338{
5339	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
5340		;
5341	} else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
5342		/*
5343		 * we only add to the nonce sum for ECT1, ECT0 does not
5344		 * change the NS bit (that we have yet to find a way to send
5345		 * it yet).
5346		 */
5347
5348		/* ECN Nonce stuff */
5349		stcb->asoc.receiver_nonce_sum++;
5350		stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
5351
5352		/*
5353		 * Drag up the last_echo point if cumack is larger since we
5354		 * don't want the point falling way behind by more than
5355		 * 2^^31 and then having it be incorrect.
5356		 */
5357		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
5358		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
5359			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
5360		}
5361	} else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
5362		/*
5363		 * Drag up the last_echo point if cumack is larger since we
5364		 * don't want the point falling way behind by more than
5365		 * 2^^31 and then having it be incorrect.
5366		 */
5367		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
5368		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
5369			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
5370		}
5371	}
5372}
5373
5374static void
5375sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
5376    uint32_t high_tsn, uint8_t ecn_bits)
5377{
5378	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
5379		/*
5380		 * we possibly must notify the sender that a congestion
5381		 * window reduction is in order. We do this by adding a ECNE
5382		 * chunk to the output chunk queue. The incoming CWR will
5383		 * remove this chunk.
5384		 */
5385		if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
5386		    MAX_TSN)) {
5387			/* Yep, we need to add a ECNE */
5388			sctp_send_ecn_echo(stcb, net, high_tsn);
5389			stcb->asoc.last_echo_tsn = high_tsn;
5390		}
5391	}
5392}
5393
5394#ifdef INVARIANTS
5395static void
5396sctp_validate_no_locks(struct sctp_inpcb *inp)
5397{
5398	struct sctp_tcb *stcb;
5399
5400	LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
5401		if (mtx_owned(&stcb->tcb_mtx)) {
5402			panic("Own lock on stcb at return from input");
5403		}
5404	}
5405}
5406
5407#endif
5408
5409/*
5410 * common input chunk processing (v4 and v6)
5411 */
5412void
5413sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
5414    int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
5415    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
5416    uint8_t ecn_bits, uint32_t vrf_id, uint16_t port)
5417{
5418	/*
5419	 * Control chunk processing
5420	 */
5421	uint32_t high_tsn;
5422	int fwd_tsn_seen = 0, data_processed = 0;
5423	struct mbuf *m = *mm;
5424	int abort_flag = 0;
5425	int un_sent;
5426
5427	SCTP_STAT_INCR(sctps_recvdatagrams);
5428#ifdef SCTP_AUDITING_ENABLED
5429	sctp_audit_log(0xE0, 1);
5430	sctp_auditing(0, inp, stcb, net);
5431#endif
5432
5433	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5434	    m, iphlen, offset, length, stcb);
5435	if (stcb) {
5436		/* always clear this before beginning a packet */
5437		stcb->asoc.authenticated = 0;
5438		stcb->asoc.seen_a_sack_this_pkt = 0;
5439		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5440		    stcb, stcb->asoc.state);
5441
5442		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5443		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5444			/*-
5445			 * If we hit here, we had a ref count
5446			 * up when the assoc was aborted and the
5447			 * timer is clearing out the assoc, we should
5448			 * NOT respond to any packet.. its OOTB.
5449			 */
5450			SCTP_TCB_UNLOCK(stcb);
5451			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5452			    vrf_id, port);
5453			goto out_now;
5454		}
5455	}
5456	if (IS_SCTP_CONTROL(ch)) {
5457		/* process the control portion of the SCTP packet */
5458		/* sa_ignore NO_NULL_CHK */
5459		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
5460		    inp, stcb, &net, &fwd_tsn_seen, vrf_id, port);
5461		if (stcb) {
5462			/*
5463			 * This covers us if the cookie-echo was there and
5464			 * it changes our INP.
5465			 */
5466			inp = stcb->sctp_ep;
5467			if ((net) && (port)) {
5468				if (net->port == 0) {
5469					sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5470				}
5471				net->port = port;
5472			}
5473		}
5474	} else {
5475		/*
5476		 * no control chunks, so pre-process DATA chunks (these
5477		 * checks are taken care of by control processing)
5478		 */
5479
5480		/*
5481		 * if DATA only packet, and auth is required, then punt...
5482		 * can't have authenticated without any AUTH (control)
5483		 * chunks
5484		 */
5485		if ((stcb != NULL) &&
5486		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5487		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5488			/* "silently" ignore */
5489			SCTP_STAT_INCR(sctps_recvauthmissing);
5490			SCTP_TCB_UNLOCK(stcb);
5491			goto out_now;
5492		}
5493		if (stcb == NULL) {
5494			/* out of the blue DATA chunk */
5495			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5496			    vrf_id, port);
5497			goto out_now;
5498		}
5499		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5500			/* v_tag mismatch! */
5501			SCTP_STAT_INCR(sctps_badvtag);
5502			SCTP_TCB_UNLOCK(stcb);
5503			goto out_now;
5504		}
5505	}
5506
5507	if (stcb == NULL) {
5508		/*
5509		 * no valid TCB for this packet, or we found it's a bad
5510		 * packet while processing control, or we're done with this
5511		 * packet (done or skip rest of data), so we drop it...
5512		 */
5513		goto out_now;
5514	}
5515	/*
5516	 * DATA chunk processing
5517	 */
5518	/* plow through the data chunks while length > offset */
5519
5520	/*
5521	 * Rest should be DATA only.  Check authentication state if AUTH for
5522	 * DATA is required.
5523	 */
5524	if ((length > offset) &&
5525	    (stcb != NULL) &&
5526	    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5527	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5528	    !stcb->asoc.authenticated) {
5529		/* "silently" ignore */
5530		SCTP_STAT_INCR(sctps_recvauthmissing);
5531		SCTPDBG(SCTP_DEBUG_AUTH1,
5532		    "Data chunk requires AUTH, skipped\n");
5533		goto trigger_send;
5534	}
5535	if (length > offset) {
5536		int retval;
5537
5538		/*
5539		 * First check to make sure our state is correct. We would
5540		 * not get here unless we really did have a tag, so we don't
5541		 * abort if this happens, just dump the chunk silently.
5542		 */
5543		switch (SCTP_GET_STATE(&stcb->asoc)) {
5544		case SCTP_STATE_COOKIE_ECHOED:
5545			/*
5546			 * we consider data with valid tags in this state
5547			 * shows us the cookie-ack was lost. Imply it was
5548			 * there.
5549			 */
5550			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5551				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5552				    stcb->asoc.overall_error_count,
5553				    0,
5554				    SCTP_FROM_SCTP_INPUT,
5555				    __LINE__);
5556			}
5557			stcb->asoc.overall_error_count = 0;
5558			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5559			break;
5560		case SCTP_STATE_COOKIE_WAIT:
5561			/*
5562			 * We consider OOTB any data sent during asoc setup.
5563			 */
5564			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5565			    vrf_id, port);
5566			SCTP_TCB_UNLOCK(stcb);
5567			goto out_now;
5568			/* sa_ignore NOTREACHED */
5569			break;
5570		case SCTP_STATE_EMPTY:	/* should not happen */
5571		case SCTP_STATE_INUSE:	/* should not happen */
5572		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5573		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5574		default:
5575			SCTP_TCB_UNLOCK(stcb);
5576			goto out_now;
5577			/* sa_ignore NOTREACHED */
5578			break;
5579		case SCTP_STATE_OPEN:
5580		case SCTP_STATE_SHUTDOWN_SENT:
5581			break;
5582		}
5583		/* take care of ECN, part 1. */
5584		if (stcb->asoc.ecn_allowed &&
5585		    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5586			sctp_process_ecn_marked_a(stcb, net, ecn_bits);
5587		}
5588		/* plow through the data chunks while length > offset */
5589		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
5590		    inp, stcb, net, &high_tsn);
5591		if (retval == 2) {
5592			/*
5593			 * The association aborted, NO UNLOCK needed since
5594			 * the association is destroyed.
5595			 */
5596			goto out_now;
5597		}
5598		data_processed = 1;
5599		if (retval == 0) {
5600			/* take care of ecn part 2. */
5601			if (stcb->asoc.ecn_allowed &&
5602			    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5603				sctp_process_ecn_marked_b(stcb, net, high_tsn,
5604				    ecn_bits);
5605			}
5606		}
5607		/*
5608		 * Anything important needs to have been m_copy'ed in
5609		 * process_data
5610		 */
5611	}
5612	if ((data_processed == 0) && (fwd_tsn_seen)) {
5613		int was_a_gap = 0;
5614
5615		if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
5616		    stcb->asoc.cumulative_tsn, MAX_TSN)) {
5617			/* there was a gap before this data was processed */
5618			was_a_gap = 1;
5619		}
5620		stcb->asoc.send_sack = 1;
5621		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
5622		if (abort_flag) {
5623			/* Again, we aborted so NO UNLOCK needed */
5624			goto out_now;
5625		}
5626	} else if (fwd_tsn_seen) {
5627		stcb->asoc.send_sack = 1;
5628	}
5629	/* trigger send of any chunks in queue... */
5630trigger_send:
5631#ifdef SCTP_AUDITING_ENABLED
5632	sctp_audit_log(0xE0, 2);
5633	sctp_auditing(1, inp, stcb, net);
5634#endif
5635	SCTPDBG(SCTP_DEBUG_INPUT1,
5636	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5637	    stcb->asoc.peers_rwnd,
5638	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5639	    stcb->asoc.total_flight);
5640	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5641
5642	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
5643	    ((un_sent) &&
5644	    (stcb->asoc.peers_rwnd > 0 ||
5645	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5646		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5647		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5648		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5649	}
5650#ifdef SCTP_AUDITING_ENABLED
5651	sctp_audit_log(0xE0, 3);
5652	sctp_auditing(2, inp, stcb, net);
5653#endif
5654	SCTP_TCB_UNLOCK(stcb);
5655out_now:
5656#ifdef INVARIANTS
5657	sctp_validate_no_locks(inp);
5658#endif
5659	return;
5660}
5661
5662#if 0
5663static void
5664sctp_print_mbuf_chain(struct mbuf *m)
5665{
5666	for (; m; m = SCTP_BUF_NEXT(m)) {
5667		printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m));
5668		if (SCTP_BUF_IS_EXTENDED(m))
5669			printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m));
5670	}
5671}
5672
5673#endif
5674
5675void
5676sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
5677{
5678#ifdef SCTP_MBUF_LOGGING
5679	struct mbuf *mat;
5680
5681#endif
5682	struct mbuf *m;
5683	int iphlen;
5684	uint32_t vrf_id = 0;
5685	uint8_t ecn_bits;
5686	struct ip *ip;
5687	struct sctphdr *sh;
5688	struct sctp_inpcb *inp = NULL;
5689
5690	uint32_t check, calc_check;
5691	struct sctp_nets *net;
5692	struct sctp_tcb *stcb = NULL;
5693	struct sctp_chunkhdr *ch;
5694	int refcount_up = 0;
5695	int length, mlen, offset;
5696
5697	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5698		SCTP_RELEASE_PKT(i_pak);
5699		return;
5700	}
5701	mlen = SCTP_HEADER_LEN(i_pak);
5702	iphlen = off;
5703	m = SCTP_HEADER_TO_CHAIN(i_pak);
5704
5705	net = NULL;
5706	SCTP_STAT_INCR(sctps_recvpackets);
5707	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5708
5709
5710#ifdef SCTP_MBUF_LOGGING
5711	/* Log in any input mbufs */
5712	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5713		mat = m;
5714		while (mat) {
5715			if (SCTP_BUF_IS_EXTENDED(mat)) {
5716				sctp_log_mb(mat, SCTP_MBUF_INPUT);
5717			}
5718			mat = SCTP_BUF_NEXT(mat);
5719		}
5720	}
5721#endif
5722#ifdef  SCTP_PACKET_LOGGING
5723	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
5724		sctp_packet_log(m, mlen);
5725#endif
5726	/*
5727	 * Must take out the iphlen, since mlen expects this (only effect lb
5728	 * case)
5729	 */
5730	mlen -= iphlen;
5731
5732	/*
5733	 * Get IP, SCTP, and first chunk header together in first mbuf.
5734	 */
5735	ip = mtod(m, struct ip *);
5736	offset = iphlen + sizeof(*sh) + sizeof(*ch);
5737	if (SCTP_BUF_LEN(m) < offset) {
5738		if ((m = m_pullup(m, offset)) == 0) {
5739			SCTP_STAT_INCR(sctps_hdrops);
5740			return;
5741		}
5742		ip = mtod(m, struct ip *);
5743	}
5744	/* validate mbuf chain length with IP payload length */
5745	if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) {
5746		SCTP_STAT_INCR(sctps_hdrops);
5747		goto bad;
5748	}
5749	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5750	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
5751	SCTPDBG(SCTP_DEBUG_INPUT1,
5752	    "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
5753
5754	/* SCTP does not allow broadcasts or multicasts */
5755	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
5756		goto bad;
5757	}
5758	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
5759		/*
5760		 * We only look at broadcast if its a front state, All
5761		 * others we will not have a tcb for anyway.
5762		 */
5763		goto bad;
5764	}
5765	/* validate SCTP checksum */
5766	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
5767	    "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
5768	    m->m_pkthdr.len,
5769	    if_name(m->m_pkthdr.rcvif),
5770	    m->m_pkthdr.csum_flags);
5771	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
5772		SCTP_STAT_INCR(sctps_recvhwcrc);
5773		goto sctp_skip_csum_4;
5774	}
5775	check = sh->checksum;	/* save incoming checksum */
5776	if ((check == 0) && (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback)) &&
5777	    ((ip->ip_src.s_addr == ip->ip_dst.s_addr) ||
5778	    (SCTP_IS_IT_LOOPBACK(m)))
5779	    ) {
5780		SCTP_STAT_INCR(sctps_recvnocrc);
5781		goto sctp_skip_csum_4;
5782	}
5783	sh->checksum = 0;	/* prepare for calc */
5784	calc_check = sctp_calculate_cksum(m, iphlen);
5785	SCTP_STAT_INCR(sctps_recvswcrc);
5786	if (calc_check != check) {
5787		SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5788		    calc_check, check, m, mlen, iphlen);
5789
5790		stcb = sctp_findassociation_addr(m, iphlen,
5791		    offset - sizeof(*ch),
5792		    sh, ch, &inp, &net,
5793		    vrf_id);
5794		if ((net) && (port)) {
5795			if (net->port == 0) {
5796				sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5797			}
5798			net->port = port;
5799		}
5800		if ((inp) && (stcb)) {
5801			sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
5802			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5803		} else if ((inp != NULL) && (stcb == NULL)) {
5804			refcount_up = 1;
5805		}
5806		SCTP_STAT_INCR(sctps_badsum);
5807		SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5808		goto bad;
5809	}
5810	sh->checksum = calc_check;
5811sctp_skip_csum_4:
5812	/* destination port of 0 is illegal, based on RFC2960. */
5813	if (sh->dest_port == 0) {
5814		SCTP_STAT_INCR(sctps_hdrops);
5815		goto bad;
5816	}
5817	/*
5818	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
5819	 * IP/SCTP/first chunk header...
5820	 */
5821	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
5822	    sh, ch, &inp, &net, vrf_id);
5823	if ((net) && (port)) {
5824		if (net->port == 0) {
5825			sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5826		}
5827		net->port = port;
5828	}
5829	/* inp's ref-count increased && stcb locked */
5830	if (inp == NULL) {
5831		struct sctp_init_chunk *init_chk, chunk_buf;
5832
5833		SCTP_STAT_INCR(sctps_noport);
5834#ifdef ICMP_BANDLIM
5835		/*
5836		 * we use the bandwidth limiting to protect against sending
5837		 * too many ABORTS all at once. In this case these count the
5838		 * same as an ICMP message.
5839		 */
5840		if (badport_bandlim(0) < 0)
5841			goto bad;
5842#endif				/* ICMP_BANDLIM */
5843		SCTPDBG(SCTP_DEBUG_INPUT1,
5844		    "Sending a ABORT from packet entry!\n");
5845		if (ch->chunk_type == SCTP_INITIATION) {
5846			/*
5847			 * we do a trick here to get the INIT tag, dig in
5848			 * and get the tag from the INIT and put it in the
5849			 * common header.
5850			 */
5851			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
5852			    iphlen + sizeof(*sh), sizeof(*init_chk),
5853			    (uint8_t *) & chunk_buf);
5854			if (init_chk != NULL)
5855				sh->v_tag = init_chk->init.initiate_tag;
5856		}
5857		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5858			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
5859			goto bad;
5860		}
5861		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5862			goto bad;
5863		}
5864		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
5865			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
5866		goto bad;
5867	} else if (stcb == NULL) {
5868		refcount_up = 1;
5869	}
5870#ifdef IPSEC
5871	/*
5872	 * I very much doubt any of the IPSEC stuff will work but I have no
5873	 * idea, so I will leave it in place.
5874	 */
5875	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5876		MODULE_GLOBAL(MOD_IPSEC, ipsec4stat).in_polvio++;
5877		SCTP_STAT_INCR(sctps_hdrops);
5878		goto bad;
5879	}
5880#endif				/* IPSEC */
5881
5882	/*
5883	 * common chunk processing
5884	 */
5885	length = ip->ip_len + iphlen;
5886	offset -= sizeof(struct sctp_chunkhdr);
5887
5888	ecn_bits = ip->ip_tos;
5889
5890	/* sa_ignore NO_NULL_CHK */
5891	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
5892	    inp, stcb, net, ecn_bits, vrf_id, port);
5893	/* inp's ref-count reduced && stcb unlocked */
5894	if (m) {
5895		sctp_m_freem(m);
5896	}
5897	if ((inp) && (refcount_up)) {
5898		/* reduce ref-count */
5899		SCTP_INP_DECR_REF(inp);
5900	}
5901	return;
5902bad:
5903	if (stcb) {
5904		SCTP_TCB_UNLOCK(stcb);
5905	}
5906	if ((inp) && (refcount_up)) {
5907		/* reduce ref-count */
5908		SCTP_INP_DECR_REF(inp);
5909	}
5910	if (m) {
5911		sctp_m_freem(m);
5912	}
5913	return;
5914}
5915void
5916sctp_input(i_pak, off)
5917	struct mbuf *i_pak;
5918	int off;
5919{
5920	sctp_input_with_port(i_pak, off, 0);
5921}
5922