sctp_input.c revision 218232
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2011, by Randall Stewart, rrs@lakerest.net and
4 *                          Michael Tuexen, tuexen@fh-muenster.de
5 *                          All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * a) Redistributions of source code must retain the above copyright notice,
11 *   this list of conditions and the following disclaimer.
12 *
13 * b) Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *   the documentation and/or other materials provided with the distribution.
16 *
17 * c) Neither the name of Cisco Systems, Inc. nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 218232 2011-02-03 19:22:21Z rrs $");
38
39#include <netinet/sctp_os.h>
40#include <netinet/sctp_var.h>
41#include <netinet/sctp_sysctl.h>
42#include <netinet/sctp_pcb.h>
43#include <netinet/sctp_header.h>
44#include <netinet/sctputil.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_input.h>
47#include <netinet/sctp_auth.h>
48#include <netinet/sctp_indata.h>
49#include <netinet/sctp_asconf.h>
50#include <netinet/sctp_bsd_addr.h>
51#include <netinet/sctp_timer.h>
52#include <netinet/sctp_crc32.h>
53#include <netinet/udp.h>
54#include <sys/smp.h>
55
56
57
58static void
59sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
60{
61	struct sctp_nets *net;
62
63	/*
64	 * This now not only stops all cookie timers it also stops any INIT
65	 * timers as well. This will make sure that the timers are stopped
66	 * in all collision cases.
67	 */
68	SCTP_TCB_LOCK_ASSERT(stcb);
69	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
70		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
71			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
72			    stcb->sctp_ep,
73			    stcb,
74			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
75		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
76			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
77			    stcb->sctp_ep,
78			    stcb,
79			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
80		}
81	}
82}
83
84/* INIT handler */
85static void
86sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
87    struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
88    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port)
89{
90	struct sctp_init *init;
91	struct mbuf *op_err;
92	uint32_t init_limit;
93
94	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
95	    stcb);
96	if (stcb == NULL) {
97		SCTP_INP_RLOCK(inp);
98		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
99			goto outnow;
100		}
101	}
102	op_err = NULL;
103	init = &cp->init;
104	/* First are we accepting? */
105	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
106		SCTPDBG(SCTP_DEBUG_INPUT2,
107		    "sctp_handle_init: Abort, so_qlimit:%d\n",
108		    inp->sctp_socket->so_qlimit);
109		/*
110		 * FIX ME ?? What about TCP model and we have a
111		 * match/restart case? Actually no fix is needed. the lookup
112		 * will always find the existing assoc so stcb would not be
113		 * NULL. It may be questionable to do this since we COULD
114		 * just send back the INIT-ACK and hope that the app did
115		 * accept()'s by the time the COOKIE was sent. But there is
116		 * a price to pay for COOKIE generation and I don't want to
117		 * pay it on the chance that the app will actually do some
118		 * accepts(). The App just looses and should NOT be in this
119		 * state :-)
120		 */
121		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
122		    vrf_id, port);
123		if (stcb)
124			*abort_no_unlock = 1;
125		goto outnow;
126	}
127	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
128		/* Invalid length */
129		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
130		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
131		    vrf_id, port);
132		if (stcb)
133			*abort_no_unlock = 1;
134		goto outnow;
135	}
136	/* validate parameters */
137	if (init->initiate_tag == 0) {
138		/* protocol error... send abort */
139		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
140		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
141		    vrf_id, port);
142		if (stcb)
143			*abort_no_unlock = 1;
144		goto outnow;
145	}
146	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
147		/* invalid parameter... send abort */
148		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
149		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
150		    vrf_id, port);
151		if (stcb)
152			*abort_no_unlock = 1;
153		goto outnow;
154	}
155	if (init->num_inbound_streams == 0) {
156		/* protocol error... send abort */
157		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
158		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
159		    vrf_id, port);
160		if (stcb)
161			*abort_no_unlock = 1;
162		goto outnow;
163	}
164	if (init->num_outbound_streams == 0) {
165		/* protocol error... send abort */
166		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
167		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
168		    vrf_id, port);
169		if (stcb)
170			*abort_no_unlock = 1;
171		goto outnow;
172	}
173	init_limit = offset + ntohs(cp->ch.chunk_length);
174	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
175	    init_limit)) {
176		/* auth parameter(s) error... send abort */
177		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port);
178		if (stcb)
179			*abort_no_unlock = 1;
180		goto outnow;
181	}
182	/* send an INIT-ACK w/cookie */
183	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
184	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port,
185	    ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
186outnow:
187	if (stcb == NULL) {
188		SCTP_INP_RUNLOCK(inp);
189	}
190}
191
192/*
193 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
194 */
195
196int
197sctp_is_there_unsent_data(struct sctp_tcb *stcb)
198{
199	int unsent_data = 0;
200	unsigned int i;
201	struct sctp_stream_queue_pending *sp;
202	struct sctp_association *asoc;
203
204	/*
205	 * This function returns the number of streams that have true unsent
206	 * data on them. Note that as it looks through it will clean up any
207	 * places that have old data that has been sent but left at top of
208	 * stream queue.
209	 */
210	asoc = &stcb->asoc;
211	SCTP_TCB_SEND_LOCK(stcb);
212	if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
213		/* Check to see if some data queued */
214		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
215			/* sa_ignore FREED_MEMORY */
216			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
217			if (sp == NULL) {
218				continue;
219			}
220			if ((sp->msg_is_complete) &&
221			    (sp->length == 0) &&
222			    (sp->sender_all_done)) {
223				/*
224				 * We are doing differed cleanup. Last time
225				 * through when we took all the data the
226				 * sender_all_done was not set.
227				 */
228				if (sp->put_last_out == 0) {
229					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
230					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
231					    sp->sender_all_done,
232					    sp->length,
233					    sp->msg_is_complete,
234					    sp->put_last_out);
235				}
236				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
237				TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
238				if (sp->net) {
239					sctp_free_remote_addr(sp->net);
240					sp->net = NULL;
241				}
242				if (sp->data) {
243					sctp_m_freem(sp->data);
244					sp->data = NULL;
245				}
246				sctp_free_a_strmoq(stcb, sp);
247			} else {
248				unsent_data++;
249				break;
250			}
251		}
252	}
253	SCTP_TCB_SEND_UNLOCK(stcb);
254	return (unsent_data);
255}
256
257static int
258sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
259    struct sctp_nets *net)
260{
261	struct sctp_init *init;
262	struct sctp_association *asoc;
263	struct sctp_nets *lnet;
264	unsigned int i;
265
266	init = &cp->init;
267	asoc = &stcb->asoc;
268	/* save off parameters */
269	asoc->peer_vtag = ntohl(init->initiate_tag);
270	asoc->peers_rwnd = ntohl(init->a_rwnd);
271	/* init tsn's */
272	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
273
274	if (!TAILQ_EMPTY(&asoc->nets)) {
275		/* update any ssthresh's that may have a default */
276		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
277			lnet->ssthresh = asoc->peers_rwnd;
278			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
279				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
280			}
281		}
282	}
283	SCTP_TCB_SEND_LOCK(stcb);
284	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
285		unsigned int newcnt;
286		struct sctp_stream_out *outs;
287		struct sctp_stream_queue_pending *sp, *nsp;
288		struct sctp_tmit_chunk *chk, *nchk;
289
290		/* abandon the upper streams */
291		newcnt = ntohs(init->num_inbound_streams);
292		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
293			if (chk->rec.data.stream_number >= newcnt) {
294				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
295				asoc->send_queue_cnt--;
296				if (chk->data != NULL) {
297					sctp_free_bufspace(stcb, asoc, chk, 1);
298					sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
299					    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, SCTP_SO_NOT_LOCKED);
300					if (chk->data) {
301						sctp_m_freem(chk->data);
302						chk->data = NULL;
303					}
304				}
305				sctp_free_a_chunk(stcb, chk);
306				/* sa_ignore FREED_MEMORY */
307			}
308		}
309		if (asoc->strmout) {
310			for (i = newcnt; i < asoc->pre_open_streams; i++) {
311				outs = &asoc->strmout[i];
312				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
313					TAILQ_REMOVE(&outs->outqueue, sp, next);
314					asoc->stream_queue_cnt--;
315					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
316					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
317					    sp, SCTP_SO_NOT_LOCKED);
318					if (sp->data) {
319						sctp_m_freem(sp->data);
320						sp->data = NULL;
321					}
322					if (sp->net) {
323						sctp_free_remote_addr(sp->net);
324						sp->net = NULL;
325					}
326					/* Free the chunk */
327					sctp_free_a_strmoq(stcb, sp);
328					/* sa_ignore FREED_MEMORY */
329				}
330			}
331		}
332		/* cut back the count */
333		asoc->pre_open_streams = newcnt;
334	}
335	SCTP_TCB_SEND_UNLOCK(stcb);
336	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
337
338	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
339	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
340	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
341		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
342	}
343	/* This is the next one we expect */
344	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
345
346	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
347	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
348
349	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
350	/* open the requested streams */
351
352	if (asoc->strmin != NULL) {
353		/* Free the old ones */
354		struct sctp_queued_to_read *ctl, *nctl;
355
356		for (i = 0; i < asoc->streamincnt; i++) {
357			TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) {
358				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
359				sctp_free_remote_addr(ctl->whoFrom);
360				ctl->whoFrom = NULL;
361				sctp_m_freem(ctl->data);
362				ctl->data = NULL;
363				sctp_free_a_readq(stcb, ctl);
364			}
365		}
366		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
367	}
368	asoc->streamincnt = ntohs(init->num_outbound_streams);
369	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
370		asoc->streamincnt = MAX_SCTP_STREAMS;
371	}
372	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
373	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
374	if (asoc->strmin == NULL) {
375		/* we didn't get memory for the streams! */
376		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
377		return (-1);
378	}
379	for (i = 0; i < asoc->streamincnt; i++) {
380		asoc->strmin[i].stream_no = i;
381		asoc->strmin[i].last_sequence_delivered = 0xffff;
382		/*
383		 * U-stream ranges will be set when the cookie is unpacked.
384		 * Or for the INIT sender they are un set (if pr-sctp not
385		 * supported) when the INIT-ACK arrives.
386		 */
387		TAILQ_INIT(&asoc->strmin[i].inqueue);
388		asoc->strmin[i].delivery_started = 0;
389	}
390	/*
391	 * load_address_from_init will put the addresses into the
392	 * association when the COOKIE is processed or the INIT-ACK is
393	 * processed. Both types of COOKIE's existing and new call this
394	 * routine. It will remove addresses that are no longer in the
395	 * association (for the restarting case where addresses are
396	 * removed). Up front when the INIT arrives we will discard it if it
397	 * is a restart and new addresses have been added.
398	 */
399	/* sa_ignore MEMLEAK */
400	return (0);
401}
402
403/*
404 * INIT-ACK message processing/consumption returns value < 0 on error
405 */
406static int
407sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
408    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
409    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
410{
411	struct sctp_association *asoc;
412	struct mbuf *op_err;
413	int retval, abort_flag;
414	uint32_t initack_limit;
415	int nat_friendly = 0;
416
417	/* First verify that we have no illegal param's */
418	abort_flag = 0;
419	op_err = NULL;
420
421	op_err = sctp_arethere_unrecognized_parameters(m,
422	    (offset + sizeof(struct sctp_init_chunk)),
423	    &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
424	if (abort_flag) {
425		/* Send an abort and notify peer */
426		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
427		*abort_no_unlock = 1;
428		return (-1);
429	}
430	asoc = &stcb->asoc;
431	asoc->peer_supports_nat = (uint8_t) nat_friendly;
432	/* process the peer's parameters in the INIT-ACK */
433	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
434	if (retval < 0) {
435		return (retval);
436	}
437	initack_limit = offset + ntohs(cp->ch.chunk_length);
438	/* load all addresses */
439	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
440	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
441	    NULL))) {
442		/* Huh, we should abort */
443		SCTPDBG(SCTP_DEBUG_INPUT1,
444		    "Load addresses from INIT causes an abort %d\n",
445		    retval);
446		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
447		    NULL, 0, net->port);
448		*abort_no_unlock = 1;
449		return (-1);
450	}
451	/* if the peer doesn't support asconf, flush the asconf queue */
452	if (asoc->peer_supports_asconf == 0) {
453		struct sctp_asconf_addr *param, *nparam;
454
455		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
456			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
457			SCTP_FREE(param, SCTP_M_ASC_ADDR);
458		}
459	}
460	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
461	    stcb->asoc.local_hmacs);
462	if (op_err) {
463		sctp_queue_op_err(stcb, op_err);
464		/* queuing will steal away the mbuf chain to the out queue */
465		op_err = NULL;
466	}
467	/* extract the cookie and queue it to "echo" it back... */
468	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
469		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
470		    stcb->asoc.overall_error_count,
471		    0,
472		    SCTP_FROM_SCTP_INPUT,
473		    __LINE__);
474	}
475	stcb->asoc.overall_error_count = 0;
476	net->error_count = 0;
477
478	/*
479	 * Cancel the INIT timer, We do this first before queueing the
480	 * cookie. We always cancel at the primary to assue that we are
481	 * canceling the timer started by the INIT which always goes to the
482	 * primary.
483	 */
484	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
485	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
486
487	/* calculate the RTO */
488	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy,
489	    SCTP_DETERMINE_LL_NOTOK);
490
491	retval = sctp_send_cookie_echo(m, offset, stcb, net);
492	if (retval < 0) {
493		/*
494		 * No cookie, we probably should send a op error. But in any
495		 * case if there is no cookie in the INIT-ACK, we can
496		 * abandon the peer, its broke.
497		 */
498		if (retval == -3) {
499			/* We abort with an error of missing mandatory param */
500			op_err =
501			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
502			if (op_err) {
503				/*
504				 * Expand beyond to include the mandatory
505				 * param cookie
506				 */
507				struct sctp_inv_mandatory_param *mp;
508
509				SCTP_BUF_LEN(op_err) =
510				    sizeof(struct sctp_inv_mandatory_param);
511				mp = mtod(op_err,
512				    struct sctp_inv_mandatory_param *);
513				/* Subtract the reserved param */
514				mp->length =
515				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
516				mp->num_param = htonl(1);
517				mp->param = htons(SCTP_STATE_COOKIE);
518				mp->resv = 0;
519			}
520			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
521			    sh, op_err, 0, net->port);
522			*abort_no_unlock = 1;
523		}
524		return (retval);
525	}
526	return (0);
527}
528
529static void
530sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
531    struct sctp_tcb *stcb, struct sctp_nets *net)
532{
533	struct sockaddr_storage store;
534	struct sockaddr_in *sin;
535	struct sockaddr_in6 *sin6;
536	struct sctp_nets *r_net, *f_net;
537	struct timeval tv;
538	int req_prim = 0;
539
540	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
541		/* Invalid length */
542		return;
543	}
544	sin = (struct sockaddr_in *)&store;
545	sin6 = (struct sockaddr_in6 *)&store;
546
547	memset(&store, 0, sizeof(store));
548	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
549	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
550		sin->sin_family = cp->heartbeat.hb_info.addr_family;
551		sin->sin_len = cp->heartbeat.hb_info.addr_len;
552		sin->sin_port = stcb->rport;
553		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
554		    sizeof(sin->sin_addr));
555	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
556	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
557		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
558		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
559		sin6->sin6_port = stcb->rport;
560		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
561		    sizeof(sin6->sin6_addr));
562	} else {
563		return;
564	}
565	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
566	if (r_net == NULL) {
567		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
568		return;
569	}
570	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
571	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
572	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
573		/*
574		 * If the its a HB and it's random value is correct when can
575		 * confirm the destination.
576		 */
577		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
578		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
579			stcb->asoc.primary_destination = r_net;
580			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
581			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
582			f_net = TAILQ_FIRST(&stcb->asoc.nets);
583			if (f_net != r_net) {
584				/*
585				 * first one on the list is NOT the primary
586				 * sctp_cmpaddr() is much more efficent if
587				 * the primary is the first on the list,
588				 * make it so.
589				 */
590				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
591				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
592			}
593			req_prim = 1;
594		}
595		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
596		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
597	}
598	r_net->error_count = 0;
599	r_net->hb_responded = 1;
600	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
601	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
602	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
603		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
604		r_net->dest_state |= SCTP_ADDR_REACHABLE;
605		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
606		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED);
607		/* now was it the primary? if so restore */
608		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
609			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
610		}
611	}
612	/*
613	 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state,
614	 * set the destination to active state and set the cwnd to one or
615	 * two MTU's based on whether PF1 or PF2 is being used. If a T3
616	 * timer is running, for the destination, stop the timer because a
617	 * PF-heartbeat was received.
618	 */
619	if ((stcb->asoc.sctp_cmt_on_off > 0) &&
620	    (stcb->asoc.sctp_cmt_pf > 0) &&
621	    ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
622		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
623			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
624			    stcb, net,
625			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
626		}
627		net->dest_state &= ~SCTP_ADDR_PF;
628		net->cwnd = net->mtu * stcb->asoc.sctp_cmt_pf;
629		SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
630		    net, net->cwnd);
631	}
632	/* Now lets do a RTO with this */
633	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy,
634	    SCTP_DETERMINE_LL_OK);
635	/* Mobility adaptation */
636	if (req_prim) {
637		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
638		    SCTP_MOBILITY_BASE) ||
639		    sctp_is_mobility_feature_on(stcb->sctp_ep,
640		    SCTP_MOBILITY_FASTHANDOFF)) &&
641		    sctp_is_mobility_feature_on(stcb->sctp_ep,
642		    SCTP_MOBILITY_PRIM_DELETED)) {
643
644			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
645			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
646			    SCTP_MOBILITY_FASTHANDOFF)) {
647				sctp_assoc_immediate_retrans(stcb,
648				    stcb->asoc.primary_destination);
649			}
650			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
651			    SCTP_MOBILITY_BASE)) {
652				sctp_move_chunks_from_net(stcb,
653				    stcb->asoc.deleted_primary);
654			}
655			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
656			    stcb->asoc.deleted_primary);
657		}
658	}
659}
660
661static int
662sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
663{
664	/*
665	 * return 0 means we want you to proceed with the abort non-zero
666	 * means no abort processing
667	 */
668	struct sctpasochead *head;
669
670	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
671		/* generate a new vtag and send init */
672		LIST_REMOVE(stcb, sctp_asocs);
673		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
674		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
675		/*
676		 * put it in the bucket in the vtag hash of assoc's for the
677		 * system
678		 */
679		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
680		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
681		return (1);
682	}
683	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
684		/*
685		 * treat like a case where the cookie expired i.e.: - dump
686		 * current cookie. - generate a new vtag. - resend init.
687		 */
688		/* generate a new vtag and send init */
689		LIST_REMOVE(stcb, sctp_asocs);
690		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
691		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
692		sctp_stop_all_cookie_timers(stcb);
693		sctp_toss_old_cookies(stcb, &stcb->asoc);
694		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
695		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
696		/*
697		 * put it in the bucket in the vtag hash of assoc's for the
698		 * system
699		 */
700		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
701		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
702		return (1);
703	}
704	return (0);
705}
706
707static int
708sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
709    struct sctp_nets *net)
710{
711	/*
712	 * return 0 means we want you to proceed with the abort non-zero
713	 * means no abort processing
714	 */
715	if (stcb->asoc.peer_supports_auth == 0) {
716		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
717		return (0);
718	}
719	sctp_asconf_send_nat_state_update(stcb, net);
720	return (1);
721}
722
723
724static void
725sctp_handle_abort(struct sctp_abort_chunk *cp,
726    struct sctp_tcb *stcb, struct sctp_nets *net)
727{
728#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
729	struct socket *so;
730
731#endif
732	uint16_t len;
733
734	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
735	if (stcb == NULL)
736		return;
737
738	len = ntohs(cp->ch.chunk_length);
739	if (len > sizeof(struct sctp_chunkhdr)) {
740		/*
741		 * Need to check the cause codes for our two magic nat
742		 * aborts which don't kill the assoc necessarily.
743		 */
744		struct sctp_abort_chunk *cpnext;
745		struct sctp_missing_nat_state *natc;
746		uint16_t cause;
747
748		cpnext = cp;
749		cpnext++;
750		natc = (struct sctp_missing_nat_state *)cpnext;
751		cause = ntohs(natc->cause);
752		if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) {
753			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
754			    cp->ch.chunk_flags);
755			if (sctp_handle_nat_colliding_state(stcb)) {
756				return;
757			}
758		} else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) {
759			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
760			    cp->ch.chunk_flags);
761			if (sctp_handle_nat_missing_state(stcb, net)) {
762				return;
763			}
764		}
765	}
766	/* stop any receive timers */
767	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
768	/* notify user of the abort and clean up... */
769	sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
770	/* free the tcb */
771#if defined(SCTP_PANIC_ON_ABORT)
772	printf("stcb:%p state:%d rport:%d net:%p\n",
773	    stcb, stcb->asoc.state, stcb->rport, net);
774	if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
775		panic("Received an ABORT");
776	} else {
777		printf("No panic its in state %x closed\n", stcb->asoc.state);
778	}
779#endif
780	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
781	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
782	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
783		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
784	}
785#ifdef SCTP_ASOCLOG_OF_TSNS
786	sctp_print_out_track_log(stcb);
787#endif
788#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
789	so = SCTP_INP_SO(stcb->sctp_ep);
790	atomic_add_int(&stcb->asoc.refcnt, 1);
791	SCTP_TCB_UNLOCK(stcb);
792	SCTP_SOCKET_LOCK(so, 1);
793	SCTP_TCB_LOCK(stcb);
794	atomic_subtract_int(&stcb->asoc.refcnt, 1);
795#endif
796	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
797	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
798	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
799#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
800	SCTP_SOCKET_UNLOCK(so, 1);
801#endif
802	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
803}
804
805static void
806sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
807    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
808{
809	struct sctp_association *asoc;
810	int some_on_streamwheel;
811
812#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
813	struct socket *so;
814
815#endif
816
817	SCTPDBG(SCTP_DEBUG_INPUT2,
818	    "sctp_handle_shutdown: handling SHUTDOWN\n");
819	if (stcb == NULL)
820		return;
821	asoc = &stcb->asoc;
822	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
823	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
824		return;
825	}
826	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
827		/* Shutdown NOT the expected size */
828		return;
829	} else {
830		sctp_update_acked(stcb, cp, net, abort_flag);
831		if (*abort_flag) {
832			return;
833		}
834	}
835	if (asoc->control_pdapi) {
836		/*
837		 * With a normal shutdown we assume the end of last record.
838		 */
839		SCTP_INP_READ_LOCK(stcb->sctp_ep);
840		asoc->control_pdapi->end_added = 1;
841		asoc->control_pdapi->pdapi_aborted = 1;
842		asoc->control_pdapi = NULL;
843		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
844#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
845		so = SCTP_INP_SO(stcb->sctp_ep);
846		atomic_add_int(&stcb->asoc.refcnt, 1);
847		SCTP_TCB_UNLOCK(stcb);
848		SCTP_SOCKET_LOCK(so, 1);
849		SCTP_TCB_LOCK(stcb);
850		atomic_subtract_int(&stcb->asoc.refcnt, 1);
851		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
852			/* assoc was freed while we were unlocked */
853			SCTP_SOCKET_UNLOCK(so, 1);
854			return;
855		}
856#endif
857		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
858#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
859		SCTP_SOCKET_UNLOCK(so, 1);
860#endif
861	}
862	/* goto SHUTDOWN_RECEIVED state to block new requests */
863	if (stcb->sctp_socket) {
864		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
865		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
866		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
867			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
868			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
869			/*
870			 * notify upper layer that peer has initiated a
871			 * shutdown
872			 */
873			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
874
875			/* reset time */
876			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
877		}
878	}
879	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
880		/*
881		 * stop the shutdown timer, since we WILL move to
882		 * SHUTDOWN-ACK-SENT.
883		 */
884		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
885	}
886	/* Now is there unsent data on a stream somewhere? */
887	some_on_streamwheel = sctp_is_there_unsent_data(stcb);
888
889	if (!TAILQ_EMPTY(&asoc->send_queue) ||
890	    !TAILQ_EMPTY(&asoc->sent_queue) ||
891	    some_on_streamwheel) {
892		/* By returning we will push more data out */
893		return;
894	} else {
895		/* no outstanding data to send, so move on... */
896		/* send SHUTDOWN-ACK */
897		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
898		/* move to SHUTDOWN-ACK-SENT state */
899		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
900		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
901			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
902		}
903		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
904		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
905		sctp_stop_timers_for_shutdown(stcb);
906		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
907		    stcb, net);
908	}
909}
910
911static void
912sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
913    struct sctp_tcb *stcb,
914    struct sctp_nets *net)
915{
916	struct sctp_association *asoc;
917
918#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
919	struct socket *so;
920
921	so = SCTP_INP_SO(stcb->sctp_ep);
922#endif
923	SCTPDBG(SCTP_DEBUG_INPUT2,
924	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
925	if (stcb == NULL)
926		return;
927
928	asoc = &stcb->asoc;
929	/* process according to association state */
930	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
931	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
932		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
933		sctp_send_shutdown_complete(stcb, net, 1);
934		SCTP_TCB_UNLOCK(stcb);
935		return;
936	}
937	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
938	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
939		/* unexpected SHUTDOWN-ACK... so ignore... */
940		SCTP_TCB_UNLOCK(stcb);
941		return;
942	}
943	if (asoc->control_pdapi) {
944		/*
945		 * With a normal shutdown we assume the end of last record.
946		 */
947		SCTP_INP_READ_LOCK(stcb->sctp_ep);
948		asoc->control_pdapi->end_added = 1;
949		asoc->control_pdapi->pdapi_aborted = 1;
950		asoc->control_pdapi = NULL;
951		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
952#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
953		atomic_add_int(&stcb->asoc.refcnt, 1);
954		SCTP_TCB_UNLOCK(stcb);
955		SCTP_SOCKET_LOCK(so, 1);
956		SCTP_TCB_LOCK(stcb);
957		atomic_subtract_int(&stcb->asoc.refcnt, 1);
958		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
959			/* assoc was freed while we were unlocked */
960			SCTP_SOCKET_UNLOCK(so, 1);
961			return;
962		}
963#endif
964		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
965#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
966		SCTP_SOCKET_UNLOCK(so, 1);
967#endif
968	}
969	/* are the queues empty? */
970	if (!TAILQ_EMPTY(&asoc->send_queue) ||
971	    !TAILQ_EMPTY(&asoc->sent_queue) ||
972	    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
973		sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
974	}
975	/* stop the timer */
976	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
977	/* send SHUTDOWN-COMPLETE */
978	sctp_send_shutdown_complete(stcb, net, 0);
979	/* notify upper layer protocol */
980	if (stcb->sctp_socket) {
981		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
982		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
983		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
984			/* Set the connected flag to disconnected */
985			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
986		}
987	}
988	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
989	/* free the TCB but first save off the ep */
990#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
991	atomic_add_int(&stcb->asoc.refcnt, 1);
992	SCTP_TCB_UNLOCK(stcb);
993	SCTP_SOCKET_LOCK(so, 1);
994	SCTP_TCB_LOCK(stcb);
995	atomic_subtract_int(&stcb->asoc.refcnt, 1);
996#endif
997	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
998	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
999#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1000	SCTP_SOCKET_UNLOCK(so, 1);
1001#endif
1002}
1003
1004/*
1005 * Skip past the param header and then we will find the chunk that caused the
1006 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
1007 * our peer must be broken.
1008 */
1009static void
1010sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
1011    struct sctp_nets *net)
1012{
1013	struct sctp_chunkhdr *chk;
1014
1015	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
1016	switch (chk->chunk_type) {
1017	case SCTP_ASCONF_ACK:
1018	case SCTP_ASCONF:
1019		sctp_asconf_cleanup(stcb, net);
1020		break;
1021	case SCTP_FORWARD_CUM_TSN:
1022		stcb->asoc.peer_supports_prsctp = 0;
1023		break;
1024	default:
1025		SCTPDBG(SCTP_DEBUG_INPUT2,
1026		    "Peer does not support chunk type %d(%x)??\n",
1027		    chk->chunk_type, (uint32_t) chk->chunk_type);
1028		break;
1029	}
1030}
1031
1032/*
1033 * Skip past the param header and then we will find the param that caused the
1034 * problem.  There are a number of param's in a ASCONF OR the prsctp param
1035 * these will turn of specific features.
1036 */
1037static void
1038sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1039{
1040	struct sctp_paramhdr *pbad;
1041
1042	pbad = phdr + 1;
1043	switch (ntohs(pbad->param_type)) {
1044		/* pr-sctp draft */
1045	case SCTP_PRSCTP_SUPPORTED:
1046		stcb->asoc.peer_supports_prsctp = 0;
1047		break;
1048	case SCTP_SUPPORTED_CHUNK_EXT:
1049		break;
1050		/* draft-ietf-tsvwg-addip-sctp */
1051	case SCTP_HAS_NAT_SUPPORT:
1052		stcb->asoc.peer_supports_nat = 0;
1053		break;
1054	case SCTP_ADD_IP_ADDRESS:
1055	case SCTP_DEL_IP_ADDRESS:
1056	case SCTP_SET_PRIM_ADDR:
1057		stcb->asoc.peer_supports_asconf = 0;
1058		break;
1059	case SCTP_SUCCESS_REPORT:
1060	case SCTP_ERROR_CAUSE_IND:
1061		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1062		SCTPDBG(SCTP_DEBUG_INPUT2,
1063		    "Turning off ASCONF to this strange peer\n");
1064		stcb->asoc.peer_supports_asconf = 0;
1065		break;
1066	default:
1067		SCTPDBG(SCTP_DEBUG_INPUT2,
1068		    "Peer does not support param type %d(%x)??\n",
1069		    pbad->param_type, (uint32_t) pbad->param_type);
1070		break;
1071	}
1072}
1073
1074static int
1075sctp_handle_error(struct sctp_chunkhdr *ch,
1076    struct sctp_tcb *stcb, struct sctp_nets *net)
1077{
1078	int chklen;
1079	struct sctp_paramhdr *phdr;
1080	uint16_t error_type;
1081	uint16_t error_len;
1082	struct sctp_association *asoc;
1083	int adjust;
1084
1085#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1086	struct socket *so;
1087
1088#endif
1089
1090	/* parse through all of the errors and process */
1091	asoc = &stcb->asoc;
1092	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1093	    sizeof(struct sctp_chunkhdr));
1094	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1095	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1096		/* Process an Error Cause */
1097		error_type = ntohs(phdr->param_type);
1098		error_len = ntohs(phdr->param_length);
1099		if ((error_len > chklen) || (error_len == 0)) {
1100			/* invalid param length for this param */
1101			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1102			    chklen, error_len);
1103			return (0);
1104		}
1105		switch (error_type) {
1106		case SCTP_CAUSE_INVALID_STREAM:
1107		case SCTP_CAUSE_MISSING_PARAM:
1108		case SCTP_CAUSE_INVALID_PARAM:
1109		case SCTP_CAUSE_NO_USER_DATA:
1110			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1111			    error_type);
1112			break;
1113		case SCTP_CAUSE_NAT_COLLIDING_STATE:
1114			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1115			    ch->chunk_flags);
1116			if (sctp_handle_nat_colliding_state(stcb)) {
1117				return (0);
1118			}
1119			break;
1120		case SCTP_CAUSE_NAT_MISSING_STATE:
1121			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1122			    ch->chunk_flags);
1123			if (sctp_handle_nat_missing_state(stcb, net)) {
1124				return (0);
1125			}
1126			break;
1127		case SCTP_CAUSE_STALE_COOKIE:
1128			/*
1129			 * We only act if we have echoed a cookie and are
1130			 * waiting.
1131			 */
1132			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1133				int *p;
1134
1135				p = (int *)((caddr_t)phdr + sizeof(*phdr));
1136				/* Save the time doubled */
1137				asoc->cookie_preserve_req = ntohl(*p) << 1;
1138				asoc->stale_cookie_count++;
1139				if (asoc->stale_cookie_count >
1140				    asoc->max_init_times) {
1141					sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
1142					/* now free the asoc */
1143#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1144					so = SCTP_INP_SO(stcb->sctp_ep);
1145					atomic_add_int(&stcb->asoc.refcnt, 1);
1146					SCTP_TCB_UNLOCK(stcb);
1147					SCTP_SOCKET_LOCK(so, 1);
1148					SCTP_TCB_LOCK(stcb);
1149					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1150#endif
1151					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1152					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1153#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1154					SCTP_SOCKET_UNLOCK(so, 1);
1155#endif
1156					return (-1);
1157				}
1158				/* blast back to INIT state */
1159				sctp_toss_old_cookies(stcb, &stcb->asoc);
1160				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1161				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1162				sctp_stop_all_cookie_timers(stcb);
1163				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1164			}
1165			break;
1166		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1167			/*
1168			 * Nothing we can do here, we don't do hostname
1169			 * addresses so if the peer does not like my IPv6
1170			 * (or IPv4 for that matter) it does not matter. If
1171			 * they don't support that type of address, they can
1172			 * NOT possibly get that packet type... i.e. with no
1173			 * IPv6 you can't recieve a IPv6 packet. so we can
1174			 * safely ignore this one. If we ever added support
1175			 * for HOSTNAME Addresses, then we would need to do
1176			 * something here.
1177			 */
1178			break;
1179		case SCTP_CAUSE_UNRECOG_CHUNK:
1180			sctp_process_unrecog_chunk(stcb, phdr, net);
1181			break;
1182		case SCTP_CAUSE_UNRECOG_PARAM:
1183			sctp_process_unrecog_param(stcb, phdr);
1184			break;
1185		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1186			/*
1187			 * We ignore this since the timer will drive out a
1188			 * new cookie anyway and there timer will drive us
1189			 * to send a SHUTDOWN_COMPLETE. We can't send one
1190			 * here since we don't have their tag.
1191			 */
1192			break;
1193		case SCTP_CAUSE_DELETING_LAST_ADDR:
1194		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1195		case SCTP_CAUSE_DELETING_SRC_ADDR:
1196			/*
1197			 * We should NOT get these here, but in a
1198			 * ASCONF-ACK.
1199			 */
1200			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1201			    error_type);
1202			break;
1203		case SCTP_CAUSE_OUT_OF_RESC:
1204			/*
1205			 * And what, pray tell do we do with the fact that
1206			 * the peer is out of resources? Not really sure we
1207			 * could do anything but abort. I suspect this
1208			 * should have came WITH an abort instead of in a
1209			 * OP-ERROR.
1210			 */
1211			break;
1212		default:
1213			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1214			    error_type);
1215			break;
1216		}
1217		adjust = SCTP_SIZE32(error_len);
1218		chklen -= adjust;
1219		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1220	}
1221	return (0);
1222}
1223
1224static int
1225sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1226    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1227    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
1228{
1229	struct sctp_init_ack *init_ack;
1230	struct mbuf *op_err;
1231
1232	SCTPDBG(SCTP_DEBUG_INPUT2,
1233	    "sctp_handle_init_ack: handling INIT-ACK\n");
1234
1235	if (stcb == NULL) {
1236		SCTPDBG(SCTP_DEBUG_INPUT2,
1237		    "sctp_handle_init_ack: TCB is null\n");
1238		return (-1);
1239	}
1240	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1241		/* Invalid length */
1242		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1243		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1244		    op_err, 0, net->port);
1245		*abort_no_unlock = 1;
1246		return (-1);
1247	}
1248	init_ack = &cp->init;
1249	/* validate parameters */
1250	if (init_ack->initiate_tag == 0) {
1251		/* protocol error... send an abort */
1252		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1253		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1254		    op_err, 0, net->port);
1255		*abort_no_unlock = 1;
1256		return (-1);
1257	}
1258	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1259		/* protocol error... send an abort */
1260		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1261		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1262		    op_err, 0, net->port);
1263		*abort_no_unlock = 1;
1264		return (-1);
1265	}
1266	if (init_ack->num_inbound_streams == 0) {
1267		/* protocol error... send an abort */
1268		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1269		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1270		    op_err, 0, net->port);
1271		*abort_no_unlock = 1;
1272		return (-1);
1273	}
1274	if (init_ack->num_outbound_streams == 0) {
1275		/* protocol error... send an abort */
1276		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1277		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1278		    op_err, 0, net->port);
1279		*abort_no_unlock = 1;
1280		return (-1);
1281	}
1282	/* process according to association state... */
1283	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1284	case SCTP_STATE_COOKIE_WAIT:
1285		/* this is the expected state for this chunk */
1286		/* process the INIT-ACK parameters */
1287		if (stcb->asoc.primary_destination->dest_state &
1288		    SCTP_ADDR_UNCONFIRMED) {
1289			/*
1290			 * The primary is where we sent the INIT, we can
1291			 * always consider it confirmed when the INIT-ACK is
1292			 * returned. Do this before we load addresses
1293			 * though.
1294			 */
1295			stcb->asoc.primary_destination->dest_state &=
1296			    ~SCTP_ADDR_UNCONFIRMED;
1297			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1298			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1299		}
1300		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
1301		    net, abort_no_unlock, vrf_id) < 0) {
1302			/* error in parsing parameters */
1303			return (-1);
1304		}
1305		/* update our state */
1306		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1307		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1308
1309		/* reset the RTO calc */
1310		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1311			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1312			    stcb->asoc.overall_error_count,
1313			    0,
1314			    SCTP_FROM_SCTP_INPUT,
1315			    __LINE__);
1316		}
1317		stcb->asoc.overall_error_count = 0;
1318		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1319		/*
1320		 * collapse the init timer back in case of a exponential
1321		 * backoff
1322		 */
1323		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1324		    stcb, net);
1325		/*
1326		 * the send at the end of the inbound data processing will
1327		 * cause the cookie to be sent
1328		 */
1329		break;
1330	case SCTP_STATE_SHUTDOWN_SENT:
1331		/* incorrect state... discard */
1332		break;
1333	case SCTP_STATE_COOKIE_ECHOED:
1334		/* incorrect state... discard */
1335		break;
1336	case SCTP_STATE_OPEN:
1337		/* incorrect state... discard */
1338		break;
1339	case SCTP_STATE_EMPTY:
1340	case SCTP_STATE_INUSE:
1341	default:
1342		/* incorrect state... discard */
1343		return (-1);
1344		break;
1345	}
1346	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1347	return (0);
1348}
1349
1350static struct sctp_tcb *
1351sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1352    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1353    struct sctp_inpcb *inp, struct sctp_nets **netp,
1354    struct sockaddr *init_src, int *notification,
1355    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1356    uint32_t vrf_id, uint16_t port);
1357
1358
1359/*
1360 * handle a state cookie for an existing association m: input packet mbuf
1361 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1362 * "split" mbuf and the cookie signature does not exist offset: offset into
1363 * mbuf to the cookie-echo chunk
1364 */
1365static struct sctp_tcb *
1366sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1367    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1368    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1369    struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
1370    uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port)
1371{
1372	struct sctp_association *asoc;
1373	struct sctp_init_chunk *init_cp, init_buf;
1374	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1375	struct sctp_nets *net;
1376	struct mbuf *op_err;
1377	struct sctp_paramhdr *ph;
1378	int chk_length;
1379	int init_offset, initack_offset, i;
1380	int retval;
1381	int spec_flag = 0;
1382	uint32_t how_indx;
1383
1384	net = *netp;
1385	/* I know that the TCB is non-NULL from the caller */
1386	asoc = &stcb->asoc;
1387	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1388		if (asoc->cookie_how[how_indx] == 0)
1389			break;
1390	}
1391	if (how_indx < sizeof(asoc->cookie_how)) {
1392		asoc->cookie_how[how_indx] = 1;
1393	}
1394	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1395		/* SHUTDOWN came in after sending INIT-ACK */
1396		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1397		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1398		    0, M_DONTWAIT, 1, MT_DATA);
1399		if (op_err == NULL) {
1400			/* FOOBAR */
1401			return (NULL);
1402		}
1403		/* Set the len */
1404		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1405		ph = mtod(op_err, struct sctp_paramhdr *);
1406		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1407		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1408		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1409		    vrf_id, net->port);
1410		if (how_indx < sizeof(asoc->cookie_how))
1411			asoc->cookie_how[how_indx] = 2;
1412		return (NULL);
1413	}
1414	/*
1415	 * find and validate the INIT chunk in the cookie (peer's info) the
1416	 * INIT should start after the cookie-echo header struct (chunk
1417	 * header, state cookie header struct)
1418	 */
1419	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1420
1421	init_cp = (struct sctp_init_chunk *)
1422	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1423	    (uint8_t *) & init_buf);
1424	if (init_cp == NULL) {
1425		/* could not pull a INIT chunk in cookie */
1426		return (NULL);
1427	}
1428	chk_length = ntohs(init_cp->ch.chunk_length);
1429	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1430		return (NULL);
1431	}
1432	/*
1433	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1434	 * INIT-ACK follows the INIT chunk
1435	 */
1436	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1437	initack_cp = (struct sctp_init_ack_chunk *)
1438	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1439	    (uint8_t *) & initack_buf);
1440	if (initack_cp == NULL) {
1441		/* could not pull INIT-ACK chunk in cookie */
1442		return (NULL);
1443	}
1444	chk_length = ntohs(initack_cp->ch.chunk_length);
1445	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1446		return (NULL);
1447	}
1448	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1449	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1450		/*
1451		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1452		 * to get into the OPEN state
1453		 */
1454		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1455			/*-
1456			 * Opps, this means that we somehow generated two vtag's
1457			 * the same. I.e. we did:
1458			 *  Us               Peer
1459			 *   <---INIT(tag=a)------
1460			 *   ----INIT-ACK(tag=t)-->
1461			 *   ----INIT(tag=t)------> *1
1462			 *   <---INIT-ACK(tag=a)---
1463                         *   <----CE(tag=t)------------- *2
1464			 *
1465			 * At point *1 we should be generating a different
1466			 * tag t'. Which means we would throw away the CE and send
1467			 * ours instead. Basically this is case C (throw away side).
1468			 */
1469			if (how_indx < sizeof(asoc->cookie_how))
1470				asoc->cookie_how[how_indx] = 17;
1471			return (NULL);
1472
1473		}
1474		switch SCTP_GET_STATE
1475			(asoc) {
1476		case SCTP_STATE_COOKIE_WAIT:
1477		case SCTP_STATE_COOKIE_ECHOED:
1478			/*
1479			 * INIT was sent but got a COOKIE_ECHO with the
1480			 * correct tags... just accept it...but we must
1481			 * process the init so that we can make sure we have
1482			 * the right seq no's.
1483			 */
1484			/* First we must process the INIT !! */
1485			retval = sctp_process_init(init_cp, stcb, net);
1486			if (retval < 0) {
1487				if (how_indx < sizeof(asoc->cookie_how))
1488					asoc->cookie_how[how_indx] = 3;
1489				return (NULL);
1490			}
1491			/* we have already processed the INIT so no problem */
1492			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1493			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1494			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1495			/* update current state */
1496			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1497				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1498			else
1499				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1500
1501			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1502			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1503				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1504				    stcb->sctp_ep, stcb, asoc->primary_destination);
1505			}
1506			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1507			sctp_stop_all_cookie_timers(stcb);
1508			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1509			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1510			    (inp->sctp_socket->so_qlimit == 0)
1511			    ) {
1512#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1513				struct socket *so;
1514
1515#endif
1516				/*
1517				 * Here is where collision would go if we
1518				 * did a connect() and instead got a
1519				 * init/init-ack/cookie done before the
1520				 * init-ack came back..
1521				 */
1522				stcb->sctp_ep->sctp_flags |=
1523				    SCTP_PCB_FLAGS_CONNECTED;
1524#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1525				so = SCTP_INP_SO(stcb->sctp_ep);
1526				atomic_add_int(&stcb->asoc.refcnt, 1);
1527				SCTP_TCB_UNLOCK(stcb);
1528				SCTP_SOCKET_LOCK(so, 1);
1529				SCTP_TCB_LOCK(stcb);
1530				atomic_add_int(&stcb->asoc.refcnt, -1);
1531				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1532					SCTP_SOCKET_UNLOCK(so, 1);
1533					return (NULL);
1534				}
1535#endif
1536				soisconnected(stcb->sctp_socket);
1537#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1538				SCTP_SOCKET_UNLOCK(so, 1);
1539#endif
1540			}
1541			/* notify upper layer */
1542			*notification = SCTP_NOTIFY_ASSOC_UP;
1543			/*
1544			 * since we did not send a HB make sure we don't
1545			 * double things
1546			 */
1547			net->hb_responded = 1;
1548			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1549			    &cookie->time_entered,
1550			    sctp_align_unsafe_makecopy,
1551			    SCTP_DETERMINE_LL_NOTOK);
1552
1553			if (stcb->asoc.sctp_autoclose_ticks &&
1554			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1555				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1556				    inp, stcb, NULL);
1557			}
1558			break;
1559		default:
1560			/*
1561			 * we're in the OPEN state (or beyond), so peer must
1562			 * have simply lost the COOKIE-ACK
1563			 */
1564			break;
1565			}	/* end switch */
1566		sctp_stop_all_cookie_timers(stcb);
1567		/*
1568		 * We ignore the return code here.. not sure if we should
1569		 * somehow abort.. but we do have an existing asoc. This
1570		 * really should not fail.
1571		 */
1572		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1573		    init_offset + sizeof(struct sctp_init_chunk),
1574		    initack_offset, sh, init_src)) {
1575			if (how_indx < sizeof(asoc->cookie_how))
1576				asoc->cookie_how[how_indx] = 4;
1577			return (NULL);
1578		}
1579		/* respond with a COOKIE-ACK */
1580		sctp_toss_old_cookies(stcb, asoc);
1581		sctp_send_cookie_ack(stcb);
1582		if (how_indx < sizeof(asoc->cookie_how))
1583			asoc->cookie_how[how_indx] = 5;
1584		return (stcb);
1585	}
1586	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1587	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1588	    cookie->tie_tag_my_vtag == 0 &&
1589	    cookie->tie_tag_peer_vtag == 0) {
1590		/*
1591		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1592		 */
1593		if (how_indx < sizeof(asoc->cookie_how))
1594			asoc->cookie_how[how_indx] = 6;
1595		return (NULL);
1596	}
1597	/*
1598	 * If nat support, and the below and stcb is established, send back
1599	 * a ABORT(colliding state) if we are established.
1600	 */
1601	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
1602	    (asoc->peer_supports_nat) &&
1603	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1604	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1605	    (asoc->peer_vtag == 0)))) {
1606		/*
1607		 * Special case - Peer's support nat. We may have two init's
1608		 * that we gave out the same tag on since one was not
1609		 * established.. i.e. we get INIT from host-1 behind the nat
1610		 * and we respond tag-a, we get a INIT from host-2 behind
1611		 * the nat and we get tag-a again. Then we bring up host-1
1612		 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
1613		 * Now we have colliding state. We must send an abort here
1614		 * with colliding state indication.
1615		 */
1616		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1617		    0, M_DONTWAIT, 1, MT_DATA);
1618		if (op_err == NULL) {
1619			/* FOOBAR */
1620			return (NULL);
1621		}
1622		/* pre-reserve some space */
1623#ifdef INET6
1624		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1625#else
1626		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
1627#endif
1628		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1629		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1630		/* Set the len */
1631		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1632		ph = mtod(op_err, struct sctp_paramhdr *);
1633		ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
1634		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1635		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
1636		return (NULL);
1637	}
1638	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1639	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1640	    (asoc->peer_vtag == 0))) {
1641		/*
1642		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1643		 * should be ok, re-accept peer info
1644		 */
1645		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1646			/*
1647			 * Extension of case C. If we hit this, then the
1648			 * random number generator returned the same vtag
1649			 * when we first sent our INIT-ACK and when we later
1650			 * sent our INIT. The side with the seq numbers that
1651			 * are different will be the one that normnally
1652			 * would have hit case C. This in effect "extends"
1653			 * our vtags in this collision case to be 64 bits.
1654			 * The same collision could occur aka you get both
1655			 * vtag and seq number the same twice in a row.. but
1656			 * is much less likely. If it did happen then we
1657			 * would proceed through and bring up the assoc.. we
1658			 * may end up with the wrong stream setup however..
1659			 * which would be bad.. but there is no way to
1660			 * tell.. until we send on a stream that does not
1661			 * exist :-)
1662			 */
1663			if (how_indx < sizeof(asoc->cookie_how))
1664				asoc->cookie_how[how_indx] = 7;
1665
1666			return (NULL);
1667		}
1668		if (how_indx < sizeof(asoc->cookie_how))
1669			asoc->cookie_how[how_indx] = 8;
1670		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1671		sctp_stop_all_cookie_timers(stcb);
1672		/*
1673		 * since we did not send a HB make sure we don't double
1674		 * things
1675		 */
1676		net->hb_responded = 1;
1677		if (stcb->asoc.sctp_autoclose_ticks &&
1678		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1679			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1680			    NULL);
1681		}
1682		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1683		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1684
1685		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1686			/*
1687			 * Ok the peer probably discarded our data (if we
1688			 * echoed a cookie+data). So anything on the
1689			 * sent_queue should be marked for retransmit, we
1690			 * may not get something to kick us so it COULD
1691			 * still take a timeout to move these.. but it can't
1692			 * hurt to mark them.
1693			 */
1694			struct sctp_tmit_chunk *chk;
1695
1696			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1697				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1698					chk->sent = SCTP_DATAGRAM_RESEND;
1699					sctp_flight_size_decrease(chk);
1700					sctp_total_flight_decrease(stcb, chk);
1701					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1702					spec_flag++;
1703				}
1704			}
1705
1706		}
1707		/* process the INIT info (peer's info) */
1708		retval = sctp_process_init(init_cp, stcb, net);
1709		if (retval < 0) {
1710			if (how_indx < sizeof(asoc->cookie_how))
1711				asoc->cookie_how[how_indx] = 9;
1712			return (NULL);
1713		}
1714		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1715		    init_offset + sizeof(struct sctp_init_chunk),
1716		    initack_offset, sh, init_src)) {
1717			if (how_indx < sizeof(asoc->cookie_how))
1718				asoc->cookie_how[how_indx] = 10;
1719			return (NULL);
1720		}
1721		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1722		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1723			*notification = SCTP_NOTIFY_ASSOC_UP;
1724
1725			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1726			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1727			    (inp->sctp_socket->so_qlimit == 0)) {
1728#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1729				struct socket *so;
1730
1731#endif
1732				stcb->sctp_ep->sctp_flags |=
1733				    SCTP_PCB_FLAGS_CONNECTED;
1734#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1735				so = SCTP_INP_SO(stcb->sctp_ep);
1736				atomic_add_int(&stcb->asoc.refcnt, 1);
1737				SCTP_TCB_UNLOCK(stcb);
1738				SCTP_SOCKET_LOCK(so, 1);
1739				SCTP_TCB_LOCK(stcb);
1740				atomic_add_int(&stcb->asoc.refcnt, -1);
1741				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1742					SCTP_SOCKET_UNLOCK(so, 1);
1743					return (NULL);
1744				}
1745#endif
1746				soisconnected(stcb->sctp_socket);
1747#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1748				SCTP_SOCKET_UNLOCK(so, 1);
1749#endif
1750			}
1751			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1752				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1753			else
1754				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1755			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1756		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1757			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1758		} else {
1759			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1760		}
1761		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1762		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1763			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1764			    stcb->sctp_ep, stcb, asoc->primary_destination);
1765		}
1766		sctp_stop_all_cookie_timers(stcb);
1767		sctp_toss_old_cookies(stcb, asoc);
1768		sctp_send_cookie_ack(stcb);
1769		if (spec_flag) {
1770			/*
1771			 * only if we have retrans set do we do this. What
1772			 * this call does is get only the COOKIE-ACK out and
1773			 * then when we return the normal call to
1774			 * sctp_chunk_output will get the retrans out behind
1775			 * this.
1776			 */
1777			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1778		}
1779		if (how_indx < sizeof(asoc->cookie_how))
1780			asoc->cookie_how[how_indx] = 11;
1781
1782		return (stcb);
1783	}
1784	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1785	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1786	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1787	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1788	    cookie->tie_tag_peer_vtag != 0) {
1789		struct sctpasochead *head;
1790
1791		if (asoc->peer_supports_nat) {
1792			/*
1793			 * This is a gross gross hack. just call the
1794			 * cookie_new code since we are allowing a duplicate
1795			 * association. I hope this works...
1796			 */
1797			return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len,
1798			    inp, netp, init_src, notification,
1799			    auth_skipped, auth_offset, auth_len,
1800			    vrf_id, port));
1801		}
1802		/*
1803		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1804		 */
1805		/* temp code */
1806		if (how_indx < sizeof(asoc->cookie_how))
1807			asoc->cookie_how[how_indx] = 12;
1808		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1809		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1810
1811		*sac_assoc_id = sctp_get_associd(stcb);
1812		/* notify upper layer */
1813		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1814		atomic_add_int(&stcb->asoc.refcnt, 1);
1815		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1816		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1817		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1818			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1819		}
1820		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1821			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1822		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1823			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1824		}
1825		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1826			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1827			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1828			    stcb->sctp_ep, stcb, asoc->primary_destination);
1829
1830		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1831			/* move to OPEN state, if not in SHUTDOWN_SENT */
1832			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1833		}
1834		asoc->pre_open_streams =
1835		    ntohs(initack_cp->init.num_outbound_streams);
1836		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1837		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1838		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1839
1840		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1841
1842		asoc->str_reset_seq_in = asoc->init_seq_number;
1843
1844		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1845		if (asoc->mapping_array) {
1846			memset(asoc->mapping_array, 0,
1847			    asoc->mapping_array_size);
1848		}
1849		if (asoc->nr_mapping_array) {
1850			memset(asoc->nr_mapping_array, 0,
1851			    asoc->mapping_array_size);
1852		}
1853		SCTP_TCB_UNLOCK(stcb);
1854		SCTP_INP_INFO_WLOCK();
1855		SCTP_INP_WLOCK(stcb->sctp_ep);
1856		SCTP_TCB_LOCK(stcb);
1857		atomic_add_int(&stcb->asoc.refcnt, -1);
1858		/* send up all the data */
1859		SCTP_TCB_SEND_LOCK(stcb);
1860
1861		sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED);
1862		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1863			stcb->asoc.strmout[i].stream_no = i;
1864			stcb->asoc.strmout[i].next_sequence_sent = 0;
1865			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1866		}
1867		/* process the INIT-ACK info (my info) */
1868		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1869		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1870
1871		/* pull from vtag hash */
1872		LIST_REMOVE(stcb, sctp_asocs);
1873		/* re-insert to new vtag position */
1874		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1875		    SCTP_BASE_INFO(hashasocmark))];
1876		/*
1877		 * put it in the bucket in the vtag hash of assoc's for the
1878		 * system
1879		 */
1880		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1881
1882		/* process the INIT info (peer's info) */
1883		SCTP_TCB_SEND_UNLOCK(stcb);
1884		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1885		SCTP_INP_INFO_WUNLOCK();
1886
1887		retval = sctp_process_init(init_cp, stcb, net);
1888		if (retval < 0) {
1889			if (how_indx < sizeof(asoc->cookie_how))
1890				asoc->cookie_how[how_indx] = 13;
1891
1892			return (NULL);
1893		}
1894		/*
1895		 * since we did not send a HB make sure we don't double
1896		 * things
1897		 */
1898		net->hb_responded = 1;
1899
1900		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1901		    init_offset + sizeof(struct sctp_init_chunk),
1902		    initack_offset, sh, init_src)) {
1903			if (how_indx < sizeof(asoc->cookie_how))
1904				asoc->cookie_how[how_indx] = 14;
1905
1906			return (NULL);
1907		}
1908		/* respond with a COOKIE-ACK */
1909		sctp_stop_all_cookie_timers(stcb);
1910		sctp_toss_old_cookies(stcb, asoc);
1911		sctp_send_cookie_ack(stcb);
1912		if (how_indx < sizeof(asoc->cookie_how))
1913			asoc->cookie_how[how_indx] = 15;
1914
1915		return (stcb);
1916	}
1917	if (how_indx < sizeof(asoc->cookie_how))
1918		asoc->cookie_how[how_indx] = 16;
1919	/* all other cases... */
1920	return (NULL);
1921}
1922
1923
1924/*
1925 * handle a state cookie for a new association m: input packet mbuf chain--
1926 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1927 * and the cookie signature does not exist offset: offset into mbuf to the
1928 * cookie-echo chunk length: length of the cookie chunk to: where the init
1929 * was from returns a new TCB
1930 */
1931struct sctp_tcb *
1932sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1933    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1934    struct sctp_inpcb *inp, struct sctp_nets **netp,
1935    struct sockaddr *init_src, int *notification,
1936    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1937    uint32_t vrf_id, uint16_t port)
1938{
1939	struct sctp_tcb *stcb;
1940	struct sctp_init_chunk *init_cp, init_buf;
1941	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1942	struct sockaddr_storage sa_store;
1943	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1944	struct sockaddr_in *sin;
1945	struct sockaddr_in6 *sin6;
1946	struct sctp_association *asoc;
1947	int chk_length;
1948	int init_offset, initack_offset, initack_limit;
1949	int retval;
1950	int error = 0;
1951	uint32_t old_tag;
1952	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1953
1954#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1955	struct socket *so;
1956
1957	so = SCTP_INP_SO(inp);
1958#endif
1959
1960	/*
1961	 * find and validate the INIT chunk in the cookie (peer's info) the
1962	 * INIT should start after the cookie-echo header struct (chunk
1963	 * header, state cookie header struct)
1964	 */
1965	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1966	init_cp = (struct sctp_init_chunk *)
1967	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1968	    (uint8_t *) & init_buf);
1969	if (init_cp == NULL) {
1970		/* could not pull a INIT chunk in cookie */
1971		SCTPDBG(SCTP_DEBUG_INPUT1,
1972		    "process_cookie_new: could not pull INIT chunk hdr\n");
1973		return (NULL);
1974	}
1975	chk_length = ntohs(init_cp->ch.chunk_length);
1976	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1977		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1978		return (NULL);
1979	}
1980	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1981	/*
1982	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1983	 * INIT-ACK follows the INIT chunk
1984	 */
1985	initack_cp = (struct sctp_init_ack_chunk *)
1986	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1987	    (uint8_t *) & initack_buf);
1988	if (initack_cp == NULL) {
1989		/* could not pull INIT-ACK chunk in cookie */
1990		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1991		return (NULL);
1992	}
1993	chk_length = ntohs(initack_cp->ch.chunk_length);
1994	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1995		return (NULL);
1996	}
1997	/*
1998	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1999	 * "initack_limit" value.  This is because the chk_length field
2000	 * includes the length of the cookie, but the cookie is omitted when
2001	 * the INIT and INIT_ACK are tacked onto the cookie...
2002	 */
2003	initack_limit = offset + cookie_len;
2004
2005	/*
2006	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2007	 * and popluate
2008	 */
2009
2010	/*
2011	 * Here we do a trick, we set in NULL for the proc/thread argument.
2012	 * We do this since in effect we only use the p argument when the
2013	 * socket is unbound and we must do an implicit bind. Since we are
2014	 * getting a cookie, we cannot be unbound.
2015	 */
2016	stcb = sctp_aloc_assoc(inp, init_src, &error,
2017	    ntohl(initack_cp->init.initiate_tag), vrf_id,
2018	    (struct thread *)NULL
2019	    );
2020	if (stcb == NULL) {
2021		struct mbuf *op_err;
2022
2023		/* memory problem? */
2024		SCTPDBG(SCTP_DEBUG_INPUT1,
2025		    "process_cookie_new: no room for another TCB!\n");
2026		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2027
2028		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2029		    sh, op_err, vrf_id, port);
2030		return (NULL);
2031	}
2032	/* get the correct sctp_nets */
2033	if (netp)
2034		*netp = sctp_findnet(stcb, init_src);
2035
2036	asoc = &stcb->asoc;
2037	/* get scope variables out of cookie */
2038	asoc->ipv4_local_scope = cookie->ipv4_scope;
2039	asoc->site_scope = cookie->site_scope;
2040	asoc->local_scope = cookie->local_scope;
2041	asoc->loopback_scope = cookie->loopback_scope;
2042
2043	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2044	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2045		struct mbuf *op_err;
2046
2047		/*
2048		 * Houston we have a problem. The EP changed while the
2049		 * cookie was in flight. Only recourse is to abort the
2050		 * association.
2051		 */
2052		atomic_add_int(&stcb->asoc.refcnt, 1);
2053		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2054		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2055		    sh, op_err, vrf_id, port);
2056#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2057		SCTP_TCB_UNLOCK(stcb);
2058		SCTP_SOCKET_LOCK(so, 1);
2059		SCTP_TCB_LOCK(stcb);
2060#endif
2061		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2062		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2063#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2064		SCTP_SOCKET_UNLOCK(so, 1);
2065#endif
2066		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2067		return (NULL);
2068	}
2069	/* process the INIT-ACK info (my info) */
2070	old_tag = asoc->my_vtag;
2071	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2072	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2073	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2074	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2075	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2076	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2077	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2078	asoc->str_reset_seq_in = asoc->init_seq_number;
2079
2080	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2081
2082	/* process the INIT info (peer's info) */
2083	if (netp)
2084		retval = sctp_process_init(init_cp, stcb, *netp);
2085	else
2086		retval = 0;
2087	if (retval < 0) {
2088		atomic_add_int(&stcb->asoc.refcnt, 1);
2089#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2090		SCTP_TCB_UNLOCK(stcb);
2091		SCTP_SOCKET_LOCK(so, 1);
2092		SCTP_TCB_LOCK(stcb);
2093#endif
2094		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2095#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2096		SCTP_SOCKET_UNLOCK(so, 1);
2097#endif
2098		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2099		return (NULL);
2100	}
2101	/* load all addresses */
2102	if (sctp_load_addresses_from_init(stcb, m, iphlen,
2103	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
2104	    init_src)) {
2105		atomic_add_int(&stcb->asoc.refcnt, 1);
2106#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2107		SCTP_TCB_UNLOCK(stcb);
2108		SCTP_SOCKET_LOCK(so, 1);
2109		SCTP_TCB_LOCK(stcb);
2110#endif
2111		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2112#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2113		SCTP_SOCKET_UNLOCK(so, 1);
2114#endif
2115		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2116		return (NULL);
2117	}
2118	/*
2119	 * verify any preceding AUTH chunk that was skipped
2120	 */
2121	/* pull the local authentication parameters from the cookie/init-ack */
2122	sctp_auth_get_cookie_params(stcb, m,
2123	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2124	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2125	if (auth_skipped) {
2126		struct sctp_auth_chunk *auth;
2127
2128		auth = (struct sctp_auth_chunk *)
2129		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2130		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2131			/* auth HMAC failed, dump the assoc and packet */
2132			SCTPDBG(SCTP_DEBUG_AUTH1,
2133			    "COOKIE-ECHO: AUTH failed\n");
2134			atomic_add_int(&stcb->asoc.refcnt, 1);
2135#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2136			SCTP_TCB_UNLOCK(stcb);
2137			SCTP_SOCKET_LOCK(so, 1);
2138			SCTP_TCB_LOCK(stcb);
2139#endif
2140			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2141#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2142			SCTP_SOCKET_UNLOCK(so, 1);
2143#endif
2144			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2145			return (NULL);
2146		} else {
2147			/* remaining chunks checked... good to go */
2148			stcb->asoc.authenticated = 1;
2149		}
2150	}
2151	/* update current state */
2152	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2153	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2154	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2155		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2156		    stcb->sctp_ep, stcb, asoc->primary_destination);
2157	}
2158	sctp_stop_all_cookie_timers(stcb);
2159	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2160	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2161
2162	/*
2163	 * if we're doing ASCONFs, check to see if we have any new local
2164	 * addresses that need to get added to the peer (eg. addresses
2165	 * changed while cookie echo in flight).  This needs to be done
2166	 * after we go to the OPEN state to do the correct asconf
2167	 * processing. else, make sure we have the correct addresses in our
2168	 * lists
2169	 */
2170
2171	/* warning, we re-use sin, sin6, sa_store here! */
2172	/* pull in local_address (our "from" address) */
2173	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
2174		/* source addr is IPv4 */
2175		sin = (struct sockaddr_in *)initack_src;
2176		memset(sin, 0, sizeof(*sin));
2177		sin->sin_family = AF_INET;
2178		sin->sin_len = sizeof(struct sockaddr_in);
2179		sin->sin_addr.s_addr = cookie->laddress[0];
2180	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
2181		/* source addr is IPv6 */
2182		sin6 = (struct sockaddr_in6 *)initack_src;
2183		memset(sin6, 0, sizeof(*sin6));
2184		sin6->sin6_family = AF_INET6;
2185		sin6->sin6_len = sizeof(struct sockaddr_in6);
2186		sin6->sin6_scope_id = cookie->scope_id;
2187		memcpy(&sin6->sin6_addr, cookie->laddress,
2188		    sizeof(sin6->sin6_addr));
2189	} else {
2190		atomic_add_int(&stcb->asoc.refcnt, 1);
2191#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2192		SCTP_TCB_UNLOCK(stcb);
2193		SCTP_SOCKET_LOCK(so, 1);
2194		SCTP_TCB_LOCK(stcb);
2195#endif
2196		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2197#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2198		SCTP_SOCKET_UNLOCK(so, 1);
2199#endif
2200		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2201		return (NULL);
2202	}
2203
2204	/* set up to notify upper layer */
2205	*notification = SCTP_NOTIFY_ASSOC_UP;
2206	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2207	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2208	    (inp->sctp_socket->so_qlimit == 0)) {
2209		/*
2210		 * This is an endpoint that called connect() how it got a
2211		 * cookie that is NEW is a bit of a mystery. It must be that
2212		 * the INIT was sent, but before it got there.. a complete
2213		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2214		 * should have went to the other code.. not here.. oh well..
2215		 * a bit of protection is worth having..
2216		 */
2217		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2218#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2219		atomic_add_int(&stcb->asoc.refcnt, 1);
2220		SCTP_TCB_UNLOCK(stcb);
2221		SCTP_SOCKET_LOCK(so, 1);
2222		SCTP_TCB_LOCK(stcb);
2223		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2224		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2225			SCTP_SOCKET_UNLOCK(so, 1);
2226			return (NULL);
2227		}
2228#endif
2229		soisconnected(stcb->sctp_socket);
2230#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2231		SCTP_SOCKET_UNLOCK(so, 1);
2232#endif
2233	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2234	    (inp->sctp_socket->so_qlimit)) {
2235		/*
2236		 * We don't want to do anything with this one. Since it is
2237		 * the listening guy. The timer will get started for
2238		 * accepted connections in the caller.
2239		 */
2240		;
2241	}
2242	/* since we did not send a HB make sure we don't double things */
2243	if ((netp) && (*netp))
2244		(*netp)->hb_responded = 1;
2245
2246	if (stcb->asoc.sctp_autoclose_ticks &&
2247	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2248		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2249	}
2250	/* calculate the RTT */
2251	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2252	if ((netp) && (*netp)) {
2253		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2254		    &cookie->time_entered, sctp_align_unsafe_makecopy,
2255		    SCTP_DETERMINE_LL_NOTOK);
2256	}
2257	/* respond with a COOKIE-ACK */
2258	sctp_send_cookie_ack(stcb);
2259
2260	/*
2261	 * check the address lists for any ASCONFs that need to be sent
2262	 * AFTER the cookie-ack is sent
2263	 */
2264	sctp_check_address_list(stcb, m,
2265	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2266	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2267	    initack_src, cookie->local_scope, cookie->site_scope,
2268	    cookie->ipv4_scope, cookie->loopback_scope);
2269
2270
2271	return (stcb);
2272}
2273
2274/*
2275 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2276 * we NEED to make sure we are not already using the vtag. If so we
2277 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2278	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2279							    SCTP_BASE_INFO(hashasocmark))];
2280	LIST_FOREACH(stcb, head, sctp_asocs) {
2281	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
2282		       -- SEND ABORT - TRY AGAIN --
2283		}
2284	}
2285*/
2286
2287/*
2288 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2289 * existing (non-NULL) TCB
2290 */
2291static struct mbuf *
2292sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2293    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2294    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2295    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2296    struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port)
2297{
2298	struct sctp_state_cookie *cookie;
2299	struct sockaddr_in6 sin6;
2300	struct sockaddr_in sin;
2301	struct sctp_tcb *l_stcb = *stcb;
2302	struct sctp_inpcb *l_inp;
2303	struct sockaddr *to;
2304	sctp_assoc_t sac_restart_id;
2305	struct sctp_pcb *ep;
2306	struct mbuf *m_sig;
2307	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2308	uint8_t *sig;
2309	uint8_t cookie_ok = 0;
2310	unsigned int size_of_pkt, sig_offset, cookie_offset;
2311	unsigned int cookie_len;
2312	struct timeval now;
2313	struct timeval time_expires;
2314	struct sockaddr_storage dest_store;
2315	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
2316	struct ip *iph;
2317	int notification = 0;
2318	struct sctp_nets *netl;
2319	int had_a_existing_tcb = 0;
2320	int send_int_conf = 0;
2321
2322	SCTPDBG(SCTP_DEBUG_INPUT2,
2323	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2324
2325	if (inp_p == NULL) {
2326		return (NULL);
2327	}
2328	/* First get the destination address setup too. */
2329	iph = mtod(m, struct ip *);
2330	switch (iph->ip_v) {
2331	case IPVERSION:
2332		{
2333			/* its IPv4 */
2334			struct sockaddr_in *lsin;
2335
2336			lsin = (struct sockaddr_in *)(localep_sa);
2337			memset(lsin, 0, sizeof(*lsin));
2338			lsin->sin_family = AF_INET;
2339			lsin->sin_len = sizeof(*lsin);
2340			lsin->sin_port = sh->dest_port;
2341			lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
2342			size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
2343			break;
2344		}
2345#ifdef INET6
2346	case IPV6_VERSION >> 4:
2347		{
2348			/* its IPv6 */
2349			struct ip6_hdr *ip6;
2350			struct sockaddr_in6 *lsin6;
2351
2352			lsin6 = (struct sockaddr_in6 *)(localep_sa);
2353			memset(lsin6, 0, sizeof(*lsin6));
2354			lsin6->sin6_family = AF_INET6;
2355			lsin6->sin6_len = sizeof(struct sockaddr_in6);
2356			ip6 = mtod(m, struct ip6_hdr *);
2357			lsin6->sin6_port = sh->dest_port;
2358			lsin6->sin6_addr = ip6->ip6_dst;
2359			size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
2360			break;
2361		}
2362#endif
2363	default:
2364		return (NULL);
2365	}
2366
2367	cookie = &cp->cookie;
2368	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2369	cookie_len = ntohs(cp->ch.chunk_length);
2370
2371	if ((cookie->peerport != sh->src_port) &&
2372	    (cookie->myport != sh->dest_port) &&
2373	    (cookie->my_vtag != sh->v_tag)) {
2374		/*
2375		 * invalid ports or bad tag.  Note that we always leave the
2376		 * v_tag in the header in network order and when we stored
2377		 * it in the my_vtag slot we also left it in network order.
2378		 * This maintains the match even though it may be in the
2379		 * opposite byte order of the machine :->
2380		 */
2381		return (NULL);
2382	}
2383	if (cookie_len > size_of_pkt ||
2384	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2385	    sizeof(struct sctp_init_chunk) +
2386	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2387		/* cookie too long!  or too small */
2388		return (NULL);
2389	}
2390	/*
2391	 * split off the signature into its own mbuf (since it should not be
2392	 * calculated in the sctp_hmac_m() call).
2393	 */
2394	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2395	if (sig_offset > size_of_pkt) {
2396		/* packet not correct size! */
2397		/* XXX this may already be accounted for earlier... */
2398		return (NULL);
2399	}
2400	m_sig = m_split(m, sig_offset, M_DONTWAIT);
2401	if (m_sig == NULL) {
2402		/* out of memory or ?? */
2403		return (NULL);
2404	}
2405#ifdef SCTP_MBUF_LOGGING
2406	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2407		struct mbuf *mat;
2408
2409		mat = m_sig;
2410		while (mat) {
2411			if (SCTP_BUF_IS_EXTENDED(mat)) {
2412				sctp_log_mb(mat, SCTP_MBUF_SPLIT);
2413			}
2414			mat = SCTP_BUF_NEXT(mat);
2415		}
2416	}
2417#endif
2418
2419	/*
2420	 * compute the signature/digest for the cookie
2421	 */
2422	ep = &(*inp_p)->sctp_ep;
2423	l_inp = *inp_p;
2424	if (l_stcb) {
2425		SCTP_TCB_UNLOCK(l_stcb);
2426	}
2427	SCTP_INP_RLOCK(l_inp);
2428	if (l_stcb) {
2429		SCTP_TCB_LOCK(l_stcb);
2430	}
2431	/* which cookie is it? */
2432	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2433	    (ep->current_secret_number != ep->last_secret_number)) {
2434		/* it's the old cookie */
2435		(void)sctp_hmac_m(SCTP_HMAC,
2436		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2437		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2438	} else {
2439		/* it's the current cookie */
2440		(void)sctp_hmac_m(SCTP_HMAC,
2441		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2442		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2443	}
2444	/* get the signature */
2445	SCTP_INP_RUNLOCK(l_inp);
2446	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2447	if (sig == NULL) {
2448		/* couldn't find signature */
2449		sctp_m_freem(m_sig);
2450		return (NULL);
2451	}
2452	/* compare the received digest with the computed digest */
2453	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2454		/* try the old cookie? */
2455		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2456		    (ep->current_secret_number != ep->last_secret_number)) {
2457			/* compute digest with old */
2458			(void)sctp_hmac_m(SCTP_HMAC,
2459			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2460			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2461			/* compare */
2462			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2463				cookie_ok = 1;
2464		}
2465	} else {
2466		cookie_ok = 1;
2467	}
2468
2469	/*
2470	 * Now before we continue we must reconstruct our mbuf so that
2471	 * normal processing of any other chunks will work.
2472	 */
2473	{
2474		struct mbuf *m_at;
2475
2476		m_at = m;
2477		while (SCTP_BUF_NEXT(m_at) != NULL) {
2478			m_at = SCTP_BUF_NEXT(m_at);
2479		}
2480		SCTP_BUF_NEXT(m_at) = m_sig;
2481	}
2482
2483	if (cookie_ok == 0) {
2484		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2485		SCTPDBG(SCTP_DEBUG_INPUT2,
2486		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2487		    (uint32_t) offset, cookie_offset, sig_offset);
2488		return (NULL);
2489	}
2490	/*
2491	 * check the cookie timestamps to be sure it's not stale
2492	 */
2493	(void)SCTP_GETTIME_TIMEVAL(&now);
2494	/* Expire time is in Ticks, so we convert to seconds */
2495	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2496	time_expires.tv_usec = cookie->time_entered.tv_usec;
2497	/*
2498	 * TODO sctp_constants.h needs alternative time macros when _KERNEL
2499	 * is undefined.
2500	 */
2501	if (timevalcmp(&now, &time_expires, >)) {
2502		/* cookie is stale! */
2503		struct mbuf *op_err;
2504		struct sctp_stale_cookie_msg *scm;
2505		uint32_t tim;
2506
2507		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2508		    0, M_DONTWAIT, 1, MT_DATA);
2509		if (op_err == NULL) {
2510			/* FOOBAR */
2511			return (NULL);
2512		}
2513		/* Set the len */
2514		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2515		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2516		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2517		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2518		    (sizeof(uint32_t))));
2519		/* seconds to usec */
2520		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2521		/* add in usec */
2522		if (tim == 0)
2523			tim = now.tv_usec - cookie->time_entered.tv_usec;
2524		scm->time_usec = htonl(tim);
2525		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
2526		    vrf_id, port);
2527		return (NULL);
2528	}
2529	/*
2530	 * Now we must see with the lookup address if we have an existing
2531	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2532	 * and a INIT collided with us and somewhere the peer sent the
2533	 * cookie on another address besides the single address our assoc
2534	 * had for him. In this case we will have one of the tie-tags set at
2535	 * least AND the address field in the cookie can be used to look it
2536	 * up.
2537	 */
2538	to = NULL;
2539	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
2540		memset(&sin6, 0, sizeof(sin6));
2541		sin6.sin6_family = AF_INET6;
2542		sin6.sin6_len = sizeof(sin6);
2543		sin6.sin6_port = sh->src_port;
2544		sin6.sin6_scope_id = cookie->scope_id;
2545		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2546		    sizeof(sin6.sin6_addr.s6_addr));
2547		to = (struct sockaddr *)&sin6;
2548	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
2549		memset(&sin, 0, sizeof(sin));
2550		sin.sin_family = AF_INET;
2551		sin.sin_len = sizeof(sin);
2552		sin.sin_port = sh->src_port;
2553		sin.sin_addr.s_addr = cookie->address[0];
2554		to = (struct sockaddr *)&sin;
2555	} else {
2556		/* This should not happen */
2557		return (NULL);
2558	}
2559	if ((*stcb == NULL) && to) {
2560		/* Yep, lets check */
2561		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
2562		if (*stcb == NULL) {
2563			/*
2564			 * We should have only got back the same inp. If we
2565			 * got back a different ep we have a problem. The
2566			 * original findep got back l_inp and now
2567			 */
2568			if (l_inp != *inp_p) {
2569				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2570			}
2571		} else {
2572			if (*locked_tcb == NULL) {
2573				/*
2574				 * In this case we found the assoc only
2575				 * after we locked the create lock. This
2576				 * means we are in a colliding case and we
2577				 * must make sure that we unlock the tcb if
2578				 * its one of the cases where we throw away
2579				 * the incoming packets.
2580				 */
2581				*locked_tcb = *stcb;
2582
2583				/*
2584				 * We must also increment the inp ref count
2585				 * since the ref_count flags was set when we
2586				 * did not find the TCB, now we found it
2587				 * which reduces the refcount.. we must
2588				 * raise it back out to balance it all :-)
2589				 */
2590				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2591				if ((*stcb)->sctp_ep != l_inp) {
2592					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2593					    (*stcb)->sctp_ep, l_inp);
2594				}
2595			}
2596		}
2597	}
2598	if (to == NULL) {
2599		return (NULL);
2600	}
2601	cookie_len -= SCTP_SIGNATURE_SIZE;
2602	if (*stcb == NULL) {
2603		/* this is the "normal" case... get a new TCB */
2604		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2605		    cookie_len, *inp_p, netp, to, &notification,
2606		    auth_skipped, auth_offset, auth_len, vrf_id, port);
2607	} else {
2608		/* this is abnormal... cookie-echo on existing TCB */
2609		had_a_existing_tcb = 1;
2610		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2611		    cookie, cookie_len, *inp_p, *stcb, netp, to,
2612		    &notification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port);
2613	}
2614
2615	if (*stcb == NULL) {
2616		/* still no TCB... must be bad cookie-echo */
2617		return (NULL);
2618	}
2619	/*
2620	 * Ok, we built an association so confirm the address we sent the
2621	 * INIT-ACK to.
2622	 */
2623	netl = sctp_findnet(*stcb, to);
2624	/*
2625	 * This code should in theory NOT run but
2626	 */
2627	if (netl == NULL) {
2628		/* TSNH! Huh, why do I need to add this address here? */
2629		int ret;
2630
2631		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2632		    SCTP_IN_COOKIE_PROC);
2633		netl = sctp_findnet(*stcb, to);
2634	}
2635	if (netl) {
2636		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2637			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2638			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2639			    netl);
2640			send_int_conf = 1;
2641		}
2642	}
2643	if (*stcb) {
2644		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2645		    *stcb, NULL);
2646	}
2647	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2648		if (!had_a_existing_tcb ||
2649		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2650			/*
2651			 * If we have a NEW cookie or the connect never
2652			 * reached the connected state during collision we
2653			 * must do the TCP accept thing.
2654			 */
2655			struct socket *so, *oso;
2656			struct sctp_inpcb *inp;
2657
2658			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2659				/*
2660				 * For a restart we will keep the same
2661				 * socket, no need to do anything. I THINK!!
2662				 */
2663				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED);
2664				if (send_int_conf) {
2665					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2666					    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2667				}
2668				return (m);
2669			}
2670			oso = (*inp_p)->sctp_socket;
2671			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2672			SCTP_TCB_UNLOCK((*stcb));
2673			so = sonewconn(oso, 0
2674			    );
2675			SCTP_TCB_LOCK((*stcb));
2676			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2677
2678			if (so == NULL) {
2679				struct mbuf *op_err;
2680
2681#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2682				struct socket *pcb_so;
2683
2684#endif
2685				/* Too many sockets */
2686				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2687				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2688				sctp_abort_association(*inp_p, NULL, m, iphlen,
2689				    sh, op_err, vrf_id, port);
2690#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2691				pcb_so = SCTP_INP_SO(*inp_p);
2692				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2693				SCTP_TCB_UNLOCK((*stcb));
2694				SCTP_SOCKET_LOCK(pcb_so, 1);
2695				SCTP_TCB_LOCK((*stcb));
2696				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2697#endif
2698				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2699#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2700				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2701#endif
2702				return (NULL);
2703			}
2704			inp = (struct sctp_inpcb *)so->so_pcb;
2705			SCTP_INP_INCR_REF(inp);
2706			/*
2707			 * We add the unbound flag here so that if we get an
2708			 * soabort() before we get the move_pcb done, we
2709			 * will properly cleanup.
2710			 */
2711			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2712			    SCTP_PCB_FLAGS_CONNECTED |
2713			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2714			    SCTP_PCB_FLAGS_UNBOUND |
2715			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2716			    SCTP_PCB_FLAGS_DONT_WAKE);
2717			inp->sctp_features = (*inp_p)->sctp_features;
2718			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2719			inp->sctp_socket = so;
2720			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2721			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
2722			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2723			inp->sctp_context = (*inp_p)->sctp_context;
2724			inp->inp_starting_point_for_iterator = NULL;
2725			/*
2726			 * copy in the authentication parameters from the
2727			 * original endpoint
2728			 */
2729			if (inp->sctp_ep.local_hmacs)
2730				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2731			inp->sctp_ep.local_hmacs =
2732			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2733			if (inp->sctp_ep.local_auth_chunks)
2734				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2735			inp->sctp_ep.local_auth_chunks =
2736			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2737
2738			/*
2739			 * Now we must move it from one hash table to
2740			 * another and get the tcb in the right place.
2741			 */
2742
2743			/*
2744			 * This is where the one-2-one socket is put into
2745			 * the accept state waiting for the accept!
2746			 */
2747			if (*stcb) {
2748				(*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE;
2749			}
2750			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2751
2752			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2753			SCTP_TCB_UNLOCK((*stcb));
2754
2755			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
2756			    0);
2757			SCTP_TCB_LOCK((*stcb));
2758			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2759
2760
2761			/*
2762			 * now we must check to see if we were aborted while
2763			 * the move was going on and the lock/unlock
2764			 * happened.
2765			 */
2766			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2767				/*
2768				 * yep it was, we leave the assoc attached
2769				 * to the socket since the sctp_inpcb_free()
2770				 * call will send an abort for us.
2771				 */
2772				SCTP_INP_DECR_REF(inp);
2773				return (NULL);
2774			}
2775			SCTP_INP_DECR_REF(inp);
2776			/* Switch over to the new guy */
2777			*inp_p = inp;
2778			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2779			if (send_int_conf) {
2780				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2781				    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2782			}
2783			/*
2784			 * Pull it from the incomplete queue and wake the
2785			 * guy
2786			 */
2787#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2788			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2789			SCTP_TCB_UNLOCK((*stcb));
2790			SCTP_SOCKET_LOCK(so, 1);
2791#endif
2792			soisconnected(so);
2793#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2794			SCTP_TCB_LOCK((*stcb));
2795			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2796			SCTP_SOCKET_UNLOCK(so, 1);
2797#endif
2798			return (m);
2799		}
2800	}
2801	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
2802		if (notification) {
2803			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2804		}
2805		if (send_int_conf) {
2806			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2807			    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2808		}
2809	}
2810	return (m);
2811}
2812
2813static void
2814sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2815    struct sctp_tcb *stcb, struct sctp_nets *net)
2816{
2817	/* cp must not be used, others call this without a c-ack :-) */
2818	struct sctp_association *asoc;
2819
2820	SCTPDBG(SCTP_DEBUG_INPUT2,
2821	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2822	if (stcb == NULL)
2823		return;
2824
2825	asoc = &stcb->asoc;
2826
2827	sctp_stop_all_cookie_timers(stcb);
2828	/* process according to association state */
2829	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2830		/* state change only needed when I am in right state */
2831		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2832		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2833		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2834			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2835			    stcb->sctp_ep, stcb, asoc->primary_destination);
2836
2837		}
2838		/* update RTO */
2839		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2840		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2841		if (asoc->overall_error_count == 0) {
2842			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2843			    &asoc->time_entered, sctp_align_safe_nocopy,
2844			    SCTP_DETERMINE_LL_NOTOK);
2845		}
2846		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2847		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2848		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2849		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2850#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2851			struct socket *so;
2852
2853#endif
2854			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2855#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2856			so = SCTP_INP_SO(stcb->sctp_ep);
2857			atomic_add_int(&stcb->asoc.refcnt, 1);
2858			SCTP_TCB_UNLOCK(stcb);
2859			SCTP_SOCKET_LOCK(so, 1);
2860			SCTP_TCB_LOCK(stcb);
2861			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2862			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2863				SCTP_SOCKET_UNLOCK(so, 1);
2864				return;
2865			}
2866#endif
2867			soisconnected(stcb->sctp_socket);
2868#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2869			SCTP_SOCKET_UNLOCK(so, 1);
2870#endif
2871		}
2872		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2873		    stcb, net);
2874		/*
2875		 * since we did not send a HB make sure we don't double
2876		 * things
2877		 */
2878		net->hb_responded = 1;
2879
2880		if (stcb->asoc.sctp_autoclose_ticks &&
2881		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2882			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2883			    stcb->sctp_ep, stcb, NULL);
2884		}
2885		/*
2886		 * send ASCONF if parameters are pending and ASCONFs are
2887		 * allowed (eg. addresses changed when init/cookie echo were
2888		 * in flight)
2889		 */
2890		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2891		    (stcb->asoc.peer_supports_asconf) &&
2892		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2893#ifdef SCTP_TIMER_BASED_ASCONF
2894			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2895			    stcb->sctp_ep, stcb,
2896			    stcb->asoc.primary_destination);
2897#else
2898			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2899			    SCTP_ADDR_NOT_LOCKED);
2900#endif
2901		}
2902	}
2903	/* Toss the cookie if I can */
2904	sctp_toss_old_cookies(stcb, asoc);
2905	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2906		/* Restart the timer if we have pending data */
2907		struct sctp_tmit_chunk *chk;
2908
2909		chk = TAILQ_FIRST(&asoc->sent_queue);
2910		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
2911	}
2912}
2913
2914static void
2915sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2916    struct sctp_tcb *stcb)
2917{
2918	struct sctp_nets *net;
2919	struct sctp_tmit_chunk *lchk;
2920	struct sctp_ecne_chunk bkup;
2921	uint8_t override_bit = 0;
2922	uint32_t tsn, window_data_tsn;
2923	int len;
2924	unsigned int pkt_cnt;
2925
2926	len = ntohs(cp->ch.chunk_length);
2927	if ((len != sizeof(struct sctp_ecne_chunk)) &&
2928	    (len != sizeof(struct old_sctp_ecne_chunk))) {
2929		return;
2930	}
2931	if (len == sizeof(struct old_sctp_ecne_chunk)) {
2932		/* Its the old format */
2933		memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
2934		bkup.num_pkts_since_cwr = htonl(1);
2935		cp = &bkup;
2936	}
2937	SCTP_STAT_INCR(sctps_recvecne);
2938	tsn = ntohl(cp->tsn);
2939	pkt_cnt = ntohl(cp->num_pkts_since_cwr);
2940	lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
2941	if (lchk == NULL) {
2942		window_data_tsn = stcb->asoc.sending_seq - 1;
2943	} else {
2944		window_data_tsn = lchk->rec.data.TSN_seq;
2945	}
2946
2947	/* Find where it was sent to if possible. */
2948	net = NULL;
2949	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
2950		if (lchk->rec.data.TSN_seq == tsn) {
2951			net = lchk->whoTo;
2952			net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
2953			break;
2954		}
2955		if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) {
2956			break;
2957		}
2958	}
2959	if (net == NULL) {
2960		/*
2961		 * What to do. A previous send of a CWR was possibly lost.
2962		 * See how old it is, we may have it marked on the actual
2963		 * net.
2964		 */
2965		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2966			if (tsn == net->last_cwr_tsn) {
2967				/* Found him, send it off */
2968				goto out;
2969			}
2970		}
2971		/*
2972		 * If we reach here, we need to send a special CWR that says
2973		 * hey, we did this a long time ago and you lost the
2974		 * response.
2975		 */
2976		net = TAILQ_FIRST(&stcb->asoc.nets);
2977		override_bit = SCTP_CWR_REDUCE_OVERRIDE;
2978	}
2979out:
2980	if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
2981	    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
2982		/*
2983		 * JRS - Use the congestion control given in the pluggable
2984		 * CC module
2985		 */
2986		int ocwnd;
2987
2988		ocwnd = net->cwnd;
2989		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
2990		/*
2991		 * We reduce once every RTT. So we will only lower cwnd at
2992		 * the next sending seq i.e. the window_data_tsn
2993		 */
2994		net->cwr_window_tsn = window_data_tsn;
2995		net->ecn_ce_pkt_cnt += pkt_cnt;
2996		net->lost_cnt = pkt_cnt;
2997		net->last_cwr_tsn = tsn;
2998	} else {
2999		override_bit |= SCTP_CWR_IN_SAME_WINDOW;
3000		if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
3001		    ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3002			/*
3003			 * Another loss in the same window update how many
3004			 * marks/packets lost we have had.
3005			 */
3006			int cnt = 1;
3007
3008			if (pkt_cnt > net->lost_cnt) {
3009				/* Should be the case */
3010				cnt = (pkt_cnt - net->lost_cnt);
3011				net->ecn_ce_pkt_cnt += cnt;
3012			}
3013			net->lost_cnt = pkt_cnt;
3014			net->last_cwr_tsn = tsn;
3015			/*
3016			 * Most CC functions will ignore this call, since we
3017			 * are in-window yet of the initial CE the peer saw.
3018			 */
3019			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3020		}
3021	}
3022	/*
3023	 * We always send a CWR this way if our previous one was lost our
3024	 * peer will get an update, or if it is not time again to reduce we
3025	 * still get the cwr to the peer. Note we set the override when we
3026	 * could not find the TSN on the chunk or the destination network.
3027	 */
3028	sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3029}
3030
3031static void
3032sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3033{
3034	/*
3035	 * Here we get a CWR from the peer. We must look in the outqueue and
3036	 * make sure that we have a covered ECNE in teh control chunk part.
3037	 * If so remove it.
3038	 */
3039	struct sctp_tmit_chunk *chk;
3040	struct sctp_ecne_chunk *ecne;
3041	int override;
3042	uint32_t cwr_tsn;
3043
3044	cwr_tsn = ntohl(cp->tsn);
3045
3046	override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3047	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
3048		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3049			continue;
3050		}
3051		if ((override == 0) && (chk->whoTo != net)) {
3052			/* Must be from the right src unless override is set */
3053			continue;
3054		}
3055		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3056		if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3057			/* this covers this ECNE, we can remove it */
3058			stcb->asoc.ecn_echo_cnt_onq--;
3059			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3060			    sctp_next);
3061			if (chk->data) {
3062				sctp_m_freem(chk->data);
3063				chk->data = NULL;
3064			}
3065			stcb->asoc.ctrl_queue_cnt--;
3066			sctp_free_a_chunk(stcb, chk);
3067			if (override == 0) {
3068				break;
3069			}
3070		}
3071	}
3072}
3073
3074static void
3075sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
3076    struct sctp_tcb *stcb, struct sctp_nets *net)
3077{
3078	struct sctp_association *asoc;
3079
3080#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3081	struct socket *so;
3082
3083#endif
3084
3085	SCTPDBG(SCTP_DEBUG_INPUT2,
3086	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3087	if (stcb == NULL)
3088		return;
3089
3090	asoc = &stcb->asoc;
3091	/* process according to association state */
3092	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3093		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
3094		SCTPDBG(SCTP_DEBUG_INPUT2,
3095		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3096		SCTP_TCB_UNLOCK(stcb);
3097		return;
3098	}
3099	/* notify upper layer protocol */
3100	if (stcb->sctp_socket) {
3101		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3102		/* are the queues empty? they should be */
3103		if (!TAILQ_EMPTY(&asoc->send_queue) ||
3104		    !TAILQ_EMPTY(&asoc->sent_queue) ||
3105		    !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
3106			sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
3107		}
3108	}
3109	/* stop the timer */
3110	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
3111	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3112	/* free the TCB */
3113	SCTPDBG(SCTP_DEBUG_INPUT2,
3114	    "sctp_handle_shutdown_complete: calls free-asoc\n");
3115#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3116	so = SCTP_INP_SO(stcb->sctp_ep);
3117	atomic_add_int(&stcb->asoc.refcnt, 1);
3118	SCTP_TCB_UNLOCK(stcb);
3119	SCTP_SOCKET_LOCK(so, 1);
3120	SCTP_TCB_LOCK(stcb);
3121	atomic_subtract_int(&stcb->asoc.refcnt, 1);
3122#endif
3123	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3124#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3125	SCTP_SOCKET_UNLOCK(so, 1);
3126#endif
3127	return;
3128}
3129
3130static int
3131process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3132    struct sctp_nets *net, uint8_t flg)
3133{
3134	switch (desc->chunk_type) {
3135	case SCTP_DATA:
3136		/* find the tsn to resend (possibly */
3137		{
3138			uint32_t tsn;
3139			struct sctp_tmit_chunk *tp1;
3140
3141			tsn = ntohl(desc->tsn_ifany);
3142			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3143				if (tp1->rec.data.TSN_seq == tsn) {
3144					/* found it */
3145					break;
3146				}
3147				if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) {
3148					/* not found */
3149					tp1 = NULL;
3150					break;
3151				}
3152			}
3153			if (tp1 == NULL) {
3154				/*
3155				 * Do it the other way , aka without paying
3156				 * attention to queue seq order.
3157				 */
3158				SCTP_STAT_INCR(sctps_pdrpdnfnd);
3159				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3160					if (tp1->rec.data.TSN_seq == tsn) {
3161						/* found it */
3162						break;
3163					}
3164				}
3165			}
3166			if (tp1 == NULL) {
3167				SCTP_STAT_INCR(sctps_pdrptsnnf);
3168			}
3169			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3170				uint8_t *ddp;
3171
3172				if (((flg & SCTP_BADCRC) == 0) &&
3173				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3174					return (0);
3175				}
3176				if ((stcb->asoc.peers_rwnd == 0) &&
3177				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3178					SCTP_STAT_INCR(sctps_pdrpdiwnp);
3179					return (0);
3180				}
3181				if (stcb->asoc.peers_rwnd == 0 &&
3182				    (flg & SCTP_FROM_MIDDLE_BOX)) {
3183					SCTP_STAT_INCR(sctps_pdrpdizrw);
3184					return (0);
3185				}
3186				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
3187				    sizeof(struct sctp_data_chunk));
3188				{
3189					unsigned int iii;
3190
3191					for (iii = 0; iii < sizeof(desc->data_bytes);
3192					    iii++) {
3193						if (ddp[iii] != desc->data_bytes[iii]) {
3194							SCTP_STAT_INCR(sctps_pdrpbadd);
3195							return (-1);
3196						}
3197					}
3198				}
3199
3200				if (tp1->do_rtt) {
3201					/*
3202					 * this guy had a RTO calculation
3203					 * pending on it, cancel it
3204					 */
3205					tp1->do_rtt = 0;
3206				}
3207				SCTP_STAT_INCR(sctps_pdrpmark);
3208				if (tp1->sent != SCTP_DATAGRAM_RESEND)
3209					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3210				/*
3211				 * mark it as if we were doing a FR, since
3212				 * we will be getting gap ack reports behind
3213				 * the info from the router.
3214				 */
3215				tp1->rec.data.doing_fast_retransmit = 1;
3216				/*
3217				 * mark the tsn with what sequences can
3218				 * cause a new FR.
3219				 */
3220				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3221					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3222				} else {
3223					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
3224				}
3225
3226				/* restart the timer */
3227				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3228				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3229				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3230				    stcb, tp1->whoTo);
3231
3232				/* fix counts and things */
3233				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3234					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3235					    tp1->whoTo->flight_size,
3236					    tp1->book_size,
3237					    (uintptr_t) stcb,
3238					    tp1->rec.data.TSN_seq);
3239				}
3240				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3241					sctp_flight_size_decrease(tp1);
3242					sctp_total_flight_decrease(stcb, tp1);
3243				}
3244				tp1->sent = SCTP_DATAGRAM_RESEND;
3245			} {
3246				/* audit code */
3247				unsigned int audit;
3248
3249				audit = 0;
3250				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3251					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3252						audit++;
3253				}
3254				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3255				    sctp_next) {
3256					if (tp1->sent == SCTP_DATAGRAM_RESEND)
3257						audit++;
3258				}
3259				if (audit != stcb->asoc.sent_queue_retran_cnt) {
3260					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3261					    audit, stcb->asoc.sent_queue_retran_cnt);
3262#ifndef SCTP_AUDITING_ENABLED
3263					stcb->asoc.sent_queue_retran_cnt = audit;
3264#endif
3265				}
3266			}
3267		}
3268		break;
3269	case SCTP_ASCONF:
3270		{
3271			struct sctp_tmit_chunk *asconf;
3272
3273			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3274			    sctp_next) {
3275				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3276					break;
3277				}
3278			}
3279			if (asconf) {
3280				if (asconf->sent != SCTP_DATAGRAM_RESEND)
3281					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3282				asconf->sent = SCTP_DATAGRAM_RESEND;
3283				asconf->snd_count--;
3284			}
3285		}
3286		break;
3287	case SCTP_INITIATION:
3288		/* resend the INIT */
3289		stcb->asoc.dropped_special_cnt++;
3290		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3291			/*
3292			 * If we can get it in, in a few attempts we do
3293			 * this, otherwise we let the timer fire.
3294			 */
3295			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3296			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3297			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3298		}
3299		break;
3300	case SCTP_SELECTIVE_ACK:
3301	case SCTP_NR_SELECTIVE_ACK:
3302		/* resend the sack */
3303		sctp_send_sack(stcb);
3304		break;
3305	case SCTP_HEARTBEAT_REQUEST:
3306		/* resend a demand HB */
3307		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3308			/*
3309			 * Only retransmit if we KNOW we wont destroy the
3310			 * tcb
3311			 */
3312			(void)sctp_send_hb(stcb, 1, net);
3313		}
3314		break;
3315	case SCTP_SHUTDOWN:
3316		sctp_send_shutdown(stcb, net);
3317		break;
3318	case SCTP_SHUTDOWN_ACK:
3319		sctp_send_shutdown_ack(stcb, net);
3320		break;
3321	case SCTP_COOKIE_ECHO:
3322		{
3323			struct sctp_tmit_chunk *cookie;
3324
3325			cookie = NULL;
3326			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3327			    sctp_next) {
3328				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3329					break;
3330				}
3331			}
3332			if (cookie) {
3333				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3334					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3335				cookie->sent = SCTP_DATAGRAM_RESEND;
3336				sctp_stop_all_cookie_timers(stcb);
3337			}
3338		}
3339		break;
3340	case SCTP_COOKIE_ACK:
3341		sctp_send_cookie_ack(stcb);
3342		break;
3343	case SCTP_ASCONF_ACK:
3344		/* resend last asconf ack */
3345		sctp_send_asconf_ack(stcb);
3346		break;
3347	case SCTP_FORWARD_CUM_TSN:
3348		send_forward_tsn(stcb, &stcb->asoc);
3349		break;
3350		/* can't do anything with these */
3351	case SCTP_PACKET_DROPPED:
3352	case SCTP_INITIATION_ACK:	/* this should not happen */
3353	case SCTP_HEARTBEAT_ACK:
3354	case SCTP_ABORT_ASSOCIATION:
3355	case SCTP_OPERATION_ERROR:
3356	case SCTP_SHUTDOWN_COMPLETE:
3357	case SCTP_ECN_ECHO:
3358	case SCTP_ECN_CWR:
3359	default:
3360		break;
3361	}
3362	return (0);
3363}
3364
3365void
3366sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3367{
3368	int i;
3369	uint16_t temp;
3370
3371	/*
3372	 * We set things to 0xffff since this is the last delivered sequence
3373	 * and we will be sending in 0 after the reset.
3374	 */
3375
3376	if (number_entries) {
3377		for (i = 0; i < number_entries; i++) {
3378			temp = ntohs(list[i]);
3379			if (temp >= stcb->asoc.streamincnt) {
3380				continue;
3381			}
3382			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3383		}
3384	} else {
3385		list = NULL;
3386		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3387			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3388		}
3389	}
3390	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3391}
3392
3393static void
3394sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3395{
3396	int i;
3397
3398	if (number_entries == 0) {
3399		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3400			stcb->asoc.strmout[i].next_sequence_sent = 0;
3401		}
3402	} else if (number_entries) {
3403		for (i = 0; i < number_entries; i++) {
3404			uint16_t temp;
3405
3406			temp = ntohs(list[i]);
3407			if (temp >= stcb->asoc.streamoutcnt) {
3408				/* no such stream */
3409				continue;
3410			}
3411			stcb->asoc.strmout[temp].next_sequence_sent = 0;
3412		}
3413	}
3414	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3415}
3416
3417
3418struct sctp_stream_reset_out_request *
3419sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3420{
3421	struct sctp_association *asoc;
3422	struct sctp_stream_reset_out_req *req;
3423	struct sctp_stream_reset_out_request *r;
3424	struct sctp_tmit_chunk *chk;
3425	int len, clen;
3426
3427	asoc = &stcb->asoc;
3428	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3429		asoc->stream_reset_outstanding = 0;
3430		return (NULL);
3431	}
3432	if (stcb->asoc.str_reset == NULL) {
3433		asoc->stream_reset_outstanding = 0;
3434		return (NULL);
3435	}
3436	chk = stcb->asoc.str_reset;
3437	if (chk->data == NULL) {
3438		return (NULL);
3439	}
3440	if (bchk) {
3441		/* he wants a copy of the chk pointer */
3442		*bchk = chk;
3443	}
3444	clen = chk->send_size;
3445	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
3446	r = &req->sr_req;
3447	if (ntohl(r->request_seq) == seq) {
3448		/* found it */
3449		return (r);
3450	}
3451	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3452	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3453		/* move to the next one, there can only be a max of two */
3454		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3455		if (ntohl(r->request_seq) == seq) {
3456			return (r);
3457		}
3458	}
3459	/* that seq is not here */
3460	return (NULL);
3461}
3462
3463static void
3464sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3465{
3466	struct sctp_association *asoc;
3467	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3468
3469	if (stcb->asoc.str_reset == NULL) {
3470		return;
3471	}
3472	asoc = &stcb->asoc;
3473
3474	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3475	TAILQ_REMOVE(&asoc->control_send_queue,
3476	    chk,
3477	    sctp_next);
3478	if (chk->data) {
3479		sctp_m_freem(chk->data);
3480		chk->data = NULL;
3481	}
3482	asoc->ctrl_queue_cnt--;
3483	sctp_free_a_chunk(stcb, chk);
3484	/* sa_ignore NO_NULL_CHK */
3485	stcb->asoc.str_reset = NULL;
3486}
3487
3488
3489static int
3490sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3491    uint32_t seq, uint32_t action,
3492    struct sctp_stream_reset_response *respin)
3493{
3494	uint16_t type;
3495	int lparm_len;
3496	struct sctp_association *asoc = &stcb->asoc;
3497	struct sctp_tmit_chunk *chk;
3498	struct sctp_stream_reset_out_request *srparam;
3499	int number_entries;
3500
3501	if (asoc->stream_reset_outstanding == 0) {
3502		/* duplicate */
3503		return (0);
3504	}
3505	if (seq == stcb->asoc.str_reset_seq_out) {
3506		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3507		if (srparam) {
3508			stcb->asoc.str_reset_seq_out++;
3509			type = ntohs(srparam->ph.param_type);
3510			lparm_len = ntohs(srparam->ph.param_length);
3511			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3512				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3513				asoc->stream_reset_out_is_outstanding = 0;
3514				if (asoc->stream_reset_outstanding)
3515					asoc->stream_reset_outstanding--;
3516				if (action == SCTP_STREAM_RESET_PERFORMED) {
3517					/* do it */
3518					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3519				} else {
3520					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3521				}
3522			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3523				/* Answered my request */
3524				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3525				if (asoc->stream_reset_outstanding)
3526					asoc->stream_reset_outstanding--;
3527				if (action != SCTP_STREAM_RESET_PERFORMED) {
3528					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3529				}
3530			} else if (type == SCTP_STR_RESET_ADD_STREAMS) {
3531				/* Ok we now may have more streams */
3532				if (asoc->stream_reset_outstanding)
3533					asoc->stream_reset_outstanding--;
3534				if (action == SCTP_STREAM_RESET_PERFORMED) {
3535					/* Put the new streams into effect */
3536					stcb->asoc.streamoutcnt = stcb->asoc.strm_realoutsize;
3537					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_OK, stcb,
3538					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3539				} else {
3540					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_FAIL, stcb,
3541					    (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
3542				}
3543			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3544				/**
3545				 * a) Adopt the new in tsn.
3546				 * b) reset the map
3547				 * c) Adopt the new out-tsn
3548				 */
3549				struct sctp_stream_reset_response_tsn *resp;
3550				struct sctp_forward_tsn_chunk fwdtsn;
3551				int abort_flag = 0;
3552
3553				if (respin == NULL) {
3554					/* huh ? */
3555					return (0);
3556				}
3557				if (action == SCTP_STREAM_RESET_PERFORMED) {
3558					resp = (struct sctp_stream_reset_response_tsn *)respin;
3559					asoc->stream_reset_outstanding--;
3560					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3561					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3562					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3563					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3564					if (abort_flag) {
3565						return (1);
3566					}
3567					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3568					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3569						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3570					}
3571					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3572					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3573					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3574
3575					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3576					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3577
3578					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3579					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3580
3581					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3582					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3583
3584				}
3585			}
3586			/* get rid of the request and get the request flags */
3587			if (asoc->stream_reset_outstanding == 0) {
3588				sctp_clean_up_stream_reset(stcb);
3589			}
3590		}
3591	}
3592	return (0);
3593}
3594
3595static void
3596sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3597    struct sctp_tmit_chunk *chk,
3598    struct sctp_stream_reset_in_request *req, int trunc)
3599{
3600	uint32_t seq;
3601	int len, i;
3602	int number_entries;
3603	uint16_t temp;
3604
3605	/*
3606	 * peer wants me to send a str-reset to him for my outgoing seq's if
3607	 * seq_in is right.
3608	 */
3609	struct sctp_association *asoc = &stcb->asoc;
3610
3611	seq = ntohl(req->request_seq);
3612	if (asoc->str_reset_seq_in == seq) {
3613		if (trunc) {
3614			/* Can't do it, since they exceeded our buffer size  */
3615			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3616			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3617			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3618		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3619			len = ntohs(req->ph.param_length);
3620			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3621			for (i = 0; i < number_entries; i++) {
3622				temp = ntohs(req->list_of_streams[i]);
3623				req->list_of_streams[i] = temp;
3624			}
3625			/* move the reset action back one */
3626			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3627			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3628			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3629			    asoc->str_reset_seq_out,
3630			    seq, (asoc->sending_seq - 1));
3631			asoc->stream_reset_out_is_outstanding = 1;
3632			asoc->str_reset = chk;
3633			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3634			stcb->asoc.stream_reset_outstanding++;
3635		} else {
3636			/* Can't do it, since we have sent one out */
3637			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3638			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
3639			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3640		}
3641		asoc->str_reset_seq_in++;
3642	} else if (asoc->str_reset_seq_in - 1 == seq) {
3643		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3644	} else if (asoc->str_reset_seq_in - 2 == seq) {
3645		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3646	} else {
3647		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3648	}
3649}
3650
3651static int
3652sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3653    struct sctp_tmit_chunk *chk,
3654    struct sctp_stream_reset_tsn_request *req)
3655{
3656	/* reset all in and out and update the tsn */
3657	/*
3658	 * A) reset my str-seq's on in and out. B) Select a receive next,
3659	 * and set cum-ack to it. Also process this selected number as a
3660	 * fwd-tsn as well. C) set in the response my next sending seq.
3661	 */
3662	struct sctp_forward_tsn_chunk fwdtsn;
3663	struct sctp_association *asoc = &stcb->asoc;
3664	int abort_flag = 0;
3665	uint32_t seq;
3666
3667	seq = ntohl(req->request_seq);
3668	if (asoc->str_reset_seq_in == seq) {
3669		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3670		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3671		fwdtsn.ch.chunk_flags = 0;
3672		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3673		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3674		if (abort_flag) {
3675			return (1);
3676		}
3677		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3678		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3679			sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3680		}
3681		stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3682		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3683		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3684		stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3685		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3686		atomic_add_int(&stcb->asoc.sending_seq, 1);
3687		/* save off historical data for retrans */
3688		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
3689		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
3690		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
3691		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
3692
3693		sctp_add_stream_reset_result_tsn(chk,
3694		    ntohl(req->request_seq),
3695		    SCTP_STREAM_RESET_PERFORMED,
3696		    stcb->asoc.sending_seq,
3697		    stcb->asoc.mapping_array_base_tsn);
3698		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3699		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3700		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3701		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3702
3703		asoc->str_reset_seq_in++;
3704	} else if (asoc->str_reset_seq_in - 1 == seq) {
3705		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3706		    stcb->asoc.last_sending_seq[0],
3707		    stcb->asoc.last_base_tsnsent[0]
3708		    );
3709	} else if (asoc->str_reset_seq_in - 2 == seq) {
3710		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3711		    stcb->asoc.last_sending_seq[1],
3712		    stcb->asoc.last_base_tsnsent[1]
3713		    );
3714	} else {
3715		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3716	}
3717	return (0);
3718}
3719
3720static void
3721sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3722    struct sctp_tmit_chunk *chk,
3723    struct sctp_stream_reset_out_request *req, int trunc)
3724{
3725	uint32_t seq, tsn;
3726	int number_entries, len;
3727	struct sctp_association *asoc = &stcb->asoc;
3728
3729	seq = ntohl(req->request_seq);
3730
3731	/* now if its not a duplicate we process it */
3732	if (asoc->str_reset_seq_in == seq) {
3733		len = ntohs(req->ph.param_length);
3734		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3735		/*
3736		 * the sender is resetting, handle the list issue.. we must
3737		 * a) verify if we can do the reset, if so no problem b) If
3738		 * we can't do the reset we must copy the request. c) queue
3739		 * it, and setup the data in processor to trigger it off
3740		 * when needed and dequeue all the queued data.
3741		 */
3742		tsn = ntohl(req->send_reset_at_tsn);
3743
3744		/* move the reset action back one */
3745		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3746		if (trunc) {
3747			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3748			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3749		} else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
3750			/* we can do it now */
3751			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3752			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3753			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3754		} else {
3755			/*
3756			 * we must queue it up and thus wait for the TSN's
3757			 * to arrive that are at or before tsn
3758			 */
3759			struct sctp_stream_reset_list *liste;
3760			int siz;
3761
3762			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3763			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3764			    siz, SCTP_M_STRESET);
3765			if (liste == NULL) {
3766				/* gak out of memory */
3767				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3768				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3769				return;
3770			}
3771			liste->tsn = tsn;
3772			liste->number_entries = number_entries;
3773			memcpy(&liste->req, req,
3774			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3775			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3776			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3777			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3778		}
3779		asoc->str_reset_seq_in++;
3780	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3781		/*
3782		 * one seq back, just echo back last action since my
3783		 * response was lost.
3784		 */
3785		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3786	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3787		/*
3788		 * two seq back, just echo back last action since my
3789		 * response was lost.
3790		 */
3791		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3792	} else {
3793		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3794	}
3795}
3796
3797static void
3798sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
3799    struct sctp_stream_reset_add_strm *str_add)
3800{
3801	/*
3802	 * Peer is requesting to add more streams. If its within our
3803	 * max-streams we will allow it.
3804	 */
3805	uint16_t num_stream, i;
3806	uint32_t seq;
3807	struct sctp_association *asoc = &stcb->asoc;
3808	struct sctp_queued_to_read *ctl, *nctl;
3809
3810	/* Get the number. */
3811	seq = ntohl(str_add->request_seq);
3812	num_stream = ntohs(str_add->number_of_streams);
3813	/* Now what would be the new total? */
3814	if (asoc->str_reset_seq_in == seq) {
3815		num_stream += stcb->asoc.streamincnt;
3816		if (num_stream > stcb->asoc.max_inbound_streams) {
3817			/* We must reject it they ask for to many */
3818	denied:
3819			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3820			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3821			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3822		} else {
3823			/* Ok, we can do that :-) */
3824			struct sctp_stream_in *oldstrm;
3825
3826			/* save off the old */
3827			oldstrm = stcb->asoc.strmin;
3828			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
3829			    (num_stream * sizeof(struct sctp_stream_in)),
3830			    SCTP_M_STRMI);
3831			if (stcb->asoc.strmin == NULL) {
3832				stcb->asoc.strmin = oldstrm;
3833				goto denied;
3834			}
3835			/* copy off the old data */
3836			for (i = 0; i < stcb->asoc.streamincnt; i++) {
3837				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3838				stcb->asoc.strmin[i].stream_no = i;
3839				stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
3840				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
3841				/* now anything on those queues? */
3842				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next, nctl) {
3843					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next);
3844					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next);
3845				}
3846			}
3847			/* Init the new streams */
3848			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
3849				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
3850				stcb->asoc.strmin[i].stream_no = i;
3851				stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3852				stcb->asoc.strmin[i].delivery_started = 0;
3853			}
3854			SCTP_FREE(oldstrm, SCTP_M_STRMI);
3855			/* update the size */
3856			stcb->asoc.streamincnt = num_stream;
3857			/* Send the ack */
3858			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3859			stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3860			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3861			sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK, stcb,
3862			    (uint32_t) stcb->asoc.streamincnt, NULL, SCTP_SO_NOT_LOCKED);
3863		}
3864	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3865		/*
3866		 * one seq back, just echo back last action since my
3867		 * response was lost.
3868		 */
3869		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3870	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3871		/*
3872		 * two seq back, just echo back last action since my
3873		 * response was lost.
3874		 */
3875		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3876	} else {
3877		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3878
3879	}
3880}
3881
3882#ifdef __GNUC__
3883__attribute__((noinline))
3884#endif
3885	static int
3886	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3887        struct sctp_stream_reset_out_req *sr_req)
3888{
3889	int chk_length, param_len, ptype;
3890	struct sctp_paramhdr pstore;
3891	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3892
3893	uint32_t seq;
3894	int num_req = 0;
3895	int trunc = 0;
3896	struct sctp_tmit_chunk *chk;
3897	struct sctp_chunkhdr *ch;
3898	struct sctp_paramhdr *ph;
3899	int ret_code = 0;
3900	int num_param = 0;
3901
3902	/* now it may be a reset or a reset-response */
3903	chk_length = ntohs(sr_req->ch.chunk_length);
3904
3905	/* setup for adding the response */
3906	sctp_alloc_a_chunk(stcb, chk);
3907	if (chk == NULL) {
3908		return (ret_code);
3909	}
3910	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3911	chk->rec.chunk_id.can_take_data = 0;
3912	chk->asoc = &stcb->asoc;
3913	chk->no_fr_allowed = 0;
3914	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3915	chk->book_size_scale = 0;
3916	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3917	if (chk->data == NULL) {
3918strres_nochunk:
3919		if (chk->data) {
3920			sctp_m_freem(chk->data);
3921			chk->data = NULL;
3922		}
3923		sctp_free_a_chunk(stcb, chk);
3924		return (ret_code);
3925	}
3926	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3927
3928	/* setup chunk parameters */
3929	chk->sent = SCTP_DATAGRAM_UNSENT;
3930	chk->snd_count = 0;
3931	chk->whoTo = stcb->asoc.primary_destination;
3932	atomic_add_int(&chk->whoTo->ref_count, 1);
3933
3934	ch = mtod(chk->data, struct sctp_chunkhdr *);
3935	ch->chunk_type = SCTP_STREAM_RESET;
3936	ch->chunk_flags = 0;
3937	ch->chunk_length = htons(chk->send_size);
3938	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3939	offset += sizeof(struct sctp_chunkhdr);
3940	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3941		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
3942		if (ph == NULL)
3943			break;
3944		param_len = ntohs(ph->param_length);
3945		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3946			/* bad param */
3947			break;
3948		}
3949		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
3950		    (uint8_t *) & cstore);
3951		ptype = ntohs(ph->param_type);
3952		num_param++;
3953		if (param_len > (int)sizeof(cstore)) {
3954			trunc = 1;
3955		} else {
3956			trunc = 0;
3957		}
3958
3959		if (num_param > SCTP_MAX_RESET_PARAMS) {
3960			/* hit the max of parameters already sorry.. */
3961			break;
3962		}
3963		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3964			struct sctp_stream_reset_out_request *req_out;
3965
3966			req_out = (struct sctp_stream_reset_out_request *)ph;
3967			num_req++;
3968			if (stcb->asoc.stream_reset_outstanding) {
3969				seq = ntohl(req_out->response_seq);
3970				if (seq == stcb->asoc.str_reset_seq_out) {
3971					/* implicit ack */
3972					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3973				}
3974			}
3975			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
3976		} else if (ptype == SCTP_STR_RESET_ADD_STREAMS) {
3977			struct sctp_stream_reset_add_strm *str_add;
3978
3979			str_add = (struct sctp_stream_reset_add_strm *)ph;
3980			num_req++;
3981			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
3982		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3983			struct sctp_stream_reset_in_request *req_in;
3984
3985			num_req++;
3986
3987			req_in = (struct sctp_stream_reset_in_request *)ph;
3988
3989			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
3990		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3991			struct sctp_stream_reset_tsn_request *req_tsn;
3992
3993			num_req++;
3994			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3995
3996			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3997				ret_code = 1;
3998				goto strres_nochunk;
3999			}
4000			/* no more */
4001			break;
4002		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
4003			struct sctp_stream_reset_response *resp;
4004			uint32_t result;
4005
4006			resp = (struct sctp_stream_reset_response *)ph;
4007			seq = ntohl(resp->response_seq);
4008			result = ntohl(resp->result);
4009			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4010				ret_code = 1;
4011				goto strres_nochunk;
4012			}
4013		} else {
4014			break;
4015		}
4016		offset += SCTP_SIZE32(param_len);
4017		chk_length -= SCTP_SIZE32(param_len);
4018	}
4019	if (num_req == 0) {
4020		/* we have no response free the stuff */
4021		goto strres_nochunk;
4022	}
4023	/* ok we have a chunk to link in */
4024	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4025	    chk,
4026	    sctp_next);
4027	stcb->asoc.ctrl_queue_cnt++;
4028	return (ret_code);
4029}
4030
4031/*
4032 * Handle a router or endpoints report of a packet loss, there are two ways
4033 * to handle this, either we get the whole packet and must disect it
4034 * ourselves (possibly with truncation and or corruption) or it is a summary
4035 * from a middle box that did the disectting for us.
4036 */
4037static void
4038sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4039    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4040{
4041	uint32_t bottle_bw, on_queue;
4042	uint16_t trunc_len;
4043	unsigned int chlen;
4044	unsigned int at;
4045	struct sctp_chunk_desc desc;
4046	struct sctp_chunkhdr *ch;
4047
4048	chlen = ntohs(cp->ch.chunk_length);
4049	chlen -= sizeof(struct sctp_pktdrop_chunk);
4050	/* XXX possible chlen underflow */
4051	if (chlen == 0) {
4052		ch = NULL;
4053		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4054			SCTP_STAT_INCR(sctps_pdrpbwrpt);
4055	} else {
4056		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4057		chlen -= sizeof(struct sctphdr);
4058		/* XXX possible chlen underflow */
4059		memset(&desc, 0, sizeof(desc));
4060	}
4061	trunc_len = (uint16_t) ntohs(cp->trunc_len);
4062	if (trunc_len > limit) {
4063		trunc_len = limit;
4064	}
4065	/* now the chunks themselves */
4066	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4067		desc.chunk_type = ch->chunk_type;
4068		/* get amount we need to move */
4069		at = ntohs(ch->chunk_length);
4070		if (at < sizeof(struct sctp_chunkhdr)) {
4071			/* corrupt chunk, maybe at the end? */
4072			SCTP_STAT_INCR(sctps_pdrpcrupt);
4073			break;
4074		}
4075		if (trunc_len == 0) {
4076			/* we are supposed to have all of it */
4077			if (at > chlen) {
4078				/* corrupt skip it */
4079				SCTP_STAT_INCR(sctps_pdrpcrupt);
4080				break;
4081			}
4082		} else {
4083			/* is there enough of it left ? */
4084			if (desc.chunk_type == SCTP_DATA) {
4085				if (chlen < (sizeof(struct sctp_data_chunk) +
4086				    sizeof(desc.data_bytes))) {
4087					break;
4088				}
4089			} else {
4090				if (chlen < sizeof(struct sctp_chunkhdr)) {
4091					break;
4092				}
4093			}
4094		}
4095		if (desc.chunk_type == SCTP_DATA) {
4096			/* can we get out the tsn? */
4097			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4098				SCTP_STAT_INCR(sctps_pdrpmbda);
4099
4100			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4101				/* yep */
4102				struct sctp_data_chunk *dcp;
4103				uint8_t *ddp;
4104				unsigned int iii;
4105
4106				dcp = (struct sctp_data_chunk *)ch;
4107				ddp = (uint8_t *) (dcp + 1);
4108				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4109					desc.data_bytes[iii] = ddp[iii];
4110				}
4111				desc.tsn_ifany = dcp->dp.tsn;
4112			} else {
4113				/* nope we are done. */
4114				SCTP_STAT_INCR(sctps_pdrpnedat);
4115				break;
4116			}
4117		} else {
4118			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4119				SCTP_STAT_INCR(sctps_pdrpmbct);
4120		}
4121
4122		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4123			SCTP_STAT_INCR(sctps_pdrppdbrk);
4124			break;
4125		}
4126		if (SCTP_SIZE32(at) > chlen) {
4127			break;
4128		}
4129		chlen -= SCTP_SIZE32(at);
4130		if (chlen < sizeof(struct sctp_chunkhdr)) {
4131			/* done, none left */
4132			break;
4133		}
4134		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4135	}
4136	/* Now update any rwnd --- possibly */
4137	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4138		/* From a peer, we get a rwnd report */
4139		uint32_t a_rwnd;
4140
4141		SCTP_STAT_INCR(sctps_pdrpfehos);
4142
4143		bottle_bw = ntohl(cp->bottle_bw);
4144		on_queue = ntohl(cp->current_onq);
4145		if (bottle_bw && on_queue) {
4146			/* a rwnd report is in here */
4147			if (bottle_bw > on_queue)
4148				a_rwnd = bottle_bw - on_queue;
4149			else
4150				a_rwnd = 0;
4151
4152			if (a_rwnd == 0)
4153				stcb->asoc.peers_rwnd = 0;
4154			else {
4155				if (a_rwnd > stcb->asoc.total_flight) {
4156					stcb->asoc.peers_rwnd =
4157					    a_rwnd - stcb->asoc.total_flight;
4158				} else {
4159					stcb->asoc.peers_rwnd = 0;
4160				}
4161				if (stcb->asoc.peers_rwnd <
4162				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4163					/* SWS sender side engages */
4164					stcb->asoc.peers_rwnd = 0;
4165				}
4166			}
4167		}
4168	} else {
4169		SCTP_STAT_INCR(sctps_pdrpfmbox);
4170	}
4171
4172	/* now middle boxes in sat networks get a cwnd bump */
4173	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4174	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
4175	    (stcb->asoc.sat_network)) {
4176		/*
4177		 * This is debateable but for sat networks it makes sense
4178		 * Note if a T3 timer has went off, we will prohibit any
4179		 * changes to cwnd until we exit the t3 loss recovery.
4180		 */
4181		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4182		    net, cp, &bottle_bw, &on_queue);
4183	}
4184}
4185
4186/*
4187 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4188 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4189 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4190 * length of the complete packet outputs: - length: modified to remaining
4191 * length after control processing - netp: modified to new sctp_nets after
4192 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4193 * bad packet,...) otherwise return the tcb for this packet
4194 */
4195#ifdef __GNUC__
4196__attribute__((noinline))
4197#endif
4198	static struct sctp_tcb *
4199	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4200             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4201             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4202             uint32_t vrf_id, uint16_t port)
4203{
4204	struct sctp_association *asoc;
4205	uint32_t vtag_in;
4206	int num_chunks = 0;	/* number of control chunks processed */
4207	uint32_t chk_length;
4208	int ret;
4209	int abort_no_unlock = 0;
4210	int ecne_seen = 0;
4211
4212	/*
4213	 * How big should this be, and should it be alloc'd? Lets try the
4214	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4215	 * until we get into jumbo grams and such..
4216	 */
4217	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4218	struct sctp_tcb *locked_tcb = stcb;
4219	int got_auth = 0;
4220	uint32_t auth_offset = 0, auth_len = 0;
4221	int auth_skipped = 0;
4222	int asconf_cnt = 0;
4223
4224#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4225	struct socket *so;
4226
4227#endif
4228
4229	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4230	    iphlen, *offset, length, stcb);
4231
4232	/* validate chunk header length... */
4233	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4234		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4235		    ntohs(ch->chunk_length));
4236		if (locked_tcb) {
4237			SCTP_TCB_UNLOCK(locked_tcb);
4238		}
4239		return (NULL);
4240	}
4241	/*
4242	 * validate the verification tag
4243	 */
4244	vtag_in = ntohl(sh->v_tag);
4245
4246	if (locked_tcb) {
4247		SCTP_TCB_LOCK_ASSERT(locked_tcb);
4248	}
4249	if (ch->chunk_type == SCTP_INITIATION) {
4250		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4251		    ntohs(ch->chunk_length), vtag_in);
4252		if (vtag_in != 0) {
4253			/* protocol error- silently discard... */
4254			SCTP_STAT_INCR(sctps_badvtag);
4255			if (locked_tcb) {
4256				SCTP_TCB_UNLOCK(locked_tcb);
4257			}
4258			return (NULL);
4259		}
4260	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4261		/*
4262		 * If there is no stcb, skip the AUTH chunk and process
4263		 * later after a stcb is found (to validate the lookup was
4264		 * valid.
4265		 */
4266		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4267		    (stcb == NULL) &&
4268		    !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4269			/* save this chunk for later processing */
4270			auth_skipped = 1;
4271			auth_offset = *offset;
4272			auth_len = ntohs(ch->chunk_length);
4273
4274			/* (temporarily) move past this chunk */
4275			*offset += SCTP_SIZE32(auth_len);
4276			if (*offset >= length) {
4277				/* no more data left in the mbuf chain */
4278				*offset = length;
4279				if (locked_tcb) {
4280					SCTP_TCB_UNLOCK(locked_tcb);
4281				}
4282				return (NULL);
4283			}
4284			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4285			    sizeof(struct sctp_chunkhdr), chunk_buf);
4286		}
4287		if (ch == NULL) {
4288			/* Help */
4289			*offset = length;
4290			if (locked_tcb) {
4291				SCTP_TCB_UNLOCK(locked_tcb);
4292			}
4293			return (NULL);
4294		}
4295		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4296			goto process_control_chunks;
4297		}
4298		/*
4299		 * first check if it's an ASCONF with an unknown src addr we
4300		 * need to look inside to find the association
4301		 */
4302		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4303			struct sctp_chunkhdr *asconf_ch = ch;
4304			uint32_t asconf_offset = 0, asconf_len = 0;
4305
4306			/* inp's refcount may be reduced */
4307			SCTP_INP_INCR_REF(inp);
4308
4309			asconf_offset = *offset;
4310			do {
4311				asconf_len = ntohs(asconf_ch->chunk_length);
4312				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4313					break;
4314				stcb = sctp_findassociation_ep_asconf(m, iphlen,
4315				    *offset, sh, &inp, netp, vrf_id);
4316				if (stcb != NULL)
4317					break;
4318				asconf_offset += SCTP_SIZE32(asconf_len);
4319				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4320				    sizeof(struct sctp_chunkhdr), chunk_buf);
4321			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4322			if (stcb == NULL) {
4323				/*
4324				 * reduce inp's refcount if not reduced in
4325				 * sctp_findassociation_ep_asconf().
4326				 */
4327				SCTP_INP_DECR_REF(inp);
4328			} else {
4329				locked_tcb = stcb;
4330			}
4331
4332			/* now go back and verify any auth chunk to be sure */
4333			if (auth_skipped && (stcb != NULL)) {
4334				struct sctp_auth_chunk *auth;
4335
4336				auth = (struct sctp_auth_chunk *)
4337				    sctp_m_getptr(m, auth_offset,
4338				    auth_len, chunk_buf);
4339				got_auth = 1;
4340				auth_skipped = 0;
4341				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4342				    auth_offset)) {
4343					/* auth HMAC failed so dump it */
4344					*offset = length;
4345					if (locked_tcb) {
4346						SCTP_TCB_UNLOCK(locked_tcb);
4347					}
4348					return (NULL);
4349				} else {
4350					/* remaining chunks are HMAC checked */
4351					stcb->asoc.authenticated = 1;
4352				}
4353			}
4354		}
4355		if (stcb == NULL) {
4356			/* no association, so it's out of the blue... */
4357			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
4358			    vrf_id, port);
4359			*offset = length;
4360			if (locked_tcb) {
4361				SCTP_TCB_UNLOCK(locked_tcb);
4362			}
4363			return (NULL);
4364		}
4365		asoc = &stcb->asoc;
4366		/* ABORT and SHUTDOWN can use either v_tag... */
4367		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4368		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4369		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4370			if ((vtag_in == asoc->my_vtag) ||
4371			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
4372			    (vtag_in == asoc->peer_vtag))) {
4373				/* this is valid */
4374			} else {
4375				/* drop this packet... */
4376				SCTP_STAT_INCR(sctps_badvtag);
4377				if (locked_tcb) {
4378					SCTP_TCB_UNLOCK(locked_tcb);
4379				}
4380				return (NULL);
4381			}
4382		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4383			if (vtag_in != asoc->my_vtag) {
4384				/*
4385				 * this could be a stale SHUTDOWN-ACK or the
4386				 * peer never got the SHUTDOWN-COMPLETE and
4387				 * is still hung; we have started a new asoc
4388				 * but it won't complete until the shutdown
4389				 * is completed
4390				 */
4391				if (locked_tcb) {
4392					SCTP_TCB_UNLOCK(locked_tcb);
4393				}
4394				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
4395				    NULL, vrf_id, port);
4396				return (NULL);
4397			}
4398		} else {
4399			/* for all other chunks, vtag must match */
4400			if (vtag_in != asoc->my_vtag) {
4401				/* invalid vtag... */
4402				SCTPDBG(SCTP_DEBUG_INPUT3,
4403				    "invalid vtag: %xh, expect %xh\n",
4404				    vtag_in, asoc->my_vtag);
4405				SCTP_STAT_INCR(sctps_badvtag);
4406				if (locked_tcb) {
4407					SCTP_TCB_UNLOCK(locked_tcb);
4408				}
4409				*offset = length;
4410				return (NULL);
4411			}
4412		}
4413	}			/* end if !SCTP_COOKIE_ECHO */
4414	/*
4415	 * process all control chunks...
4416	 */
4417	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4418	/* EY */
4419	    (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4420	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4421	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4422		/* implied cookie-ack.. we must have lost the ack */
4423		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4424			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4425			    stcb->asoc.overall_error_count,
4426			    0,
4427			    SCTP_FROM_SCTP_INPUT,
4428			    __LINE__);
4429		}
4430		stcb->asoc.overall_error_count = 0;
4431		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4432		    *netp);
4433	}
4434process_control_chunks:
4435	while (IS_SCTP_CONTROL(ch)) {
4436		/* validate chunk length */
4437		chk_length = ntohs(ch->chunk_length);
4438		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4439		    ch->chunk_type, chk_length);
4440		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4441		if (chk_length < sizeof(*ch) ||
4442		    (*offset + (int)chk_length) > length) {
4443			*offset = length;
4444			if (locked_tcb) {
4445				SCTP_TCB_UNLOCK(locked_tcb);
4446			}
4447			return (NULL);
4448		}
4449		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4450		/*
4451		 * INIT-ACK only gets the init ack "header" portion only
4452		 * because we don't have to process the peer's COOKIE. All
4453		 * others get a complete chunk.
4454		 */
4455		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4456		    (ch->chunk_type == SCTP_INITIATION)) {
4457			/* get an init-ack chunk */
4458			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4459			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4460			if (ch == NULL) {
4461				*offset = length;
4462				if (locked_tcb) {
4463					SCTP_TCB_UNLOCK(locked_tcb);
4464				}
4465				return (NULL);
4466			}
4467		} else {
4468			/* For cookies and all other chunks. */
4469			if (chk_length > sizeof(chunk_buf)) {
4470				/*
4471				 * use just the size of the chunk buffer so
4472				 * the front part of our chunks fit in
4473				 * contiguous space up to the chunk buffer
4474				 * size (508 bytes). For chunks that need to
4475				 * get more than that they must use the
4476				 * sctp_m_getptr() function or other means
4477				 * (e.g. know how to parse mbuf chains).
4478				 * Cookies do this already.
4479				 */
4480				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4481				    (sizeof(chunk_buf) - 4),
4482				    chunk_buf);
4483				if (ch == NULL) {
4484					*offset = length;
4485					if (locked_tcb) {
4486						SCTP_TCB_UNLOCK(locked_tcb);
4487					}
4488					return (NULL);
4489				}
4490			} else {
4491				/* We can fit it all */
4492				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4493				    chk_length, chunk_buf);
4494				if (ch == NULL) {
4495					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4496					*offset = length;
4497					if (locked_tcb) {
4498						SCTP_TCB_UNLOCK(locked_tcb);
4499					}
4500					return (NULL);
4501				}
4502			}
4503		}
4504		num_chunks++;
4505		/* Save off the last place we got a control from */
4506		if (stcb != NULL) {
4507			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4508				/*
4509				 * allow last_control to be NULL if
4510				 * ASCONF... ASCONF processing will find the
4511				 * right net later
4512				 */
4513				if ((netp != NULL) && (*netp != NULL))
4514					stcb->asoc.last_control_chunk_from = *netp;
4515			}
4516		}
4517#ifdef SCTP_AUDITING_ENABLED
4518		sctp_audit_log(0xB0, ch->chunk_type);
4519#endif
4520
4521		/* check to see if this chunk required auth, but isn't */
4522		if ((stcb != NULL) &&
4523		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
4524		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
4525		    !stcb->asoc.authenticated) {
4526			/* "silently" ignore */
4527			SCTP_STAT_INCR(sctps_recvauthmissing);
4528			goto next_chunk;
4529		}
4530		switch (ch->chunk_type) {
4531		case SCTP_INITIATION:
4532			/* must be first and only chunk */
4533			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4534			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4535				/* We are not interested anymore? */
4536				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4537					/*
4538					 * collision case where we are
4539					 * sending to them too
4540					 */
4541					;
4542				} else {
4543					if (locked_tcb) {
4544						SCTP_TCB_UNLOCK(locked_tcb);
4545					}
4546					*offset = length;
4547					return (NULL);
4548				}
4549			}
4550			if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
4551			    (num_chunks > 1) ||
4552			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4553				*offset = length;
4554				if (locked_tcb) {
4555					SCTP_TCB_UNLOCK(locked_tcb);
4556				}
4557				return (NULL);
4558			}
4559			if ((stcb != NULL) &&
4560			    (SCTP_GET_STATE(&stcb->asoc) ==
4561			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4562				sctp_send_shutdown_ack(stcb,
4563				    stcb->asoc.primary_destination);
4564				*offset = length;
4565				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4566				if (locked_tcb) {
4567					SCTP_TCB_UNLOCK(locked_tcb);
4568				}
4569				return (NULL);
4570			}
4571			if (netp) {
4572				sctp_handle_init(m, iphlen, *offset, sh,
4573				    (struct sctp_init_chunk *)ch, inp,
4574				    stcb, *netp, &abort_no_unlock, vrf_id, port);
4575			}
4576			if (abort_no_unlock)
4577				return (NULL);
4578
4579			*offset = length;
4580			if (locked_tcb) {
4581				SCTP_TCB_UNLOCK(locked_tcb);
4582			}
4583			return (NULL);
4584			break;
4585		case SCTP_PAD_CHUNK:
4586			break;
4587		case SCTP_INITIATION_ACK:
4588			/* must be first and only chunk */
4589			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4590			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4591				/* We are not interested anymore */
4592				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4593					;
4594				} else {
4595					if (locked_tcb != stcb) {
4596						/* Very unlikely */
4597						SCTP_TCB_UNLOCK(locked_tcb);
4598					}
4599					*offset = length;
4600					if (stcb) {
4601#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4602						so = SCTP_INP_SO(inp);
4603						atomic_add_int(&stcb->asoc.refcnt, 1);
4604						SCTP_TCB_UNLOCK(stcb);
4605						SCTP_SOCKET_LOCK(so, 1);
4606						SCTP_TCB_LOCK(stcb);
4607						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4608#endif
4609						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4610#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4611						SCTP_SOCKET_UNLOCK(so, 1);
4612#endif
4613					}
4614					return (NULL);
4615				}
4616			}
4617			if ((num_chunks > 1) ||
4618			    (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4619				*offset = length;
4620				if (locked_tcb) {
4621					SCTP_TCB_UNLOCK(locked_tcb);
4622				}
4623				return (NULL);
4624			}
4625			if ((netp) && (*netp)) {
4626				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
4627				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
4628			} else {
4629				ret = -1;
4630			}
4631			/*
4632			 * Special case, I must call the output routine to
4633			 * get the cookie echoed
4634			 */
4635			if (abort_no_unlock)
4636				return (NULL);
4637
4638			if ((stcb) && ret == 0)
4639				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4640			*offset = length;
4641			if (locked_tcb) {
4642				SCTP_TCB_UNLOCK(locked_tcb);
4643			}
4644			return (NULL);
4645			break;
4646		case SCTP_SELECTIVE_ACK:
4647			{
4648				struct sctp_sack_chunk *sack;
4649				int abort_now = 0;
4650				uint32_t a_rwnd, cum_ack;
4651				uint16_t num_seg, num_dup;
4652				uint8_t flags;
4653				int offset_seg, offset_dup;
4654
4655				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4656				SCTP_STAT_INCR(sctps_recvsacks);
4657				if (stcb == NULL) {
4658					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
4659					break;
4660				}
4661				if (chk_length < sizeof(struct sctp_sack_chunk)) {
4662					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
4663					break;
4664				}
4665				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4666					/*-
4667					 * If we have sent a shutdown-ack, we will pay no
4668					 * attention to a sack sent in to us since
4669					 * we don't care anymore.
4670					 */
4671					break;
4672				}
4673				sack = (struct sctp_sack_chunk *)ch;
4674				flags = ch->chunk_flags;
4675				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4676				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4677				num_dup = ntohs(sack->sack.num_dup_tsns);
4678				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4679				if (sizeof(struct sctp_sack_chunk) +
4680				    num_seg * sizeof(struct sctp_gap_ack_block) +
4681				    num_dup * sizeof(uint32_t) != chk_length) {
4682					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
4683					break;
4684				}
4685				offset_seg = *offset + sizeof(struct sctp_sack_chunk);
4686				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4687				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4688				    cum_ack, num_seg, a_rwnd);
4689				stcb->asoc.seen_a_sack_this_pkt = 1;
4690				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4691				    (num_seg == 0) &&
4692				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
4693				    (stcb->asoc.saw_sack_with_frags == 0) &&
4694				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4695				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4696				    ) {
4697					/*
4698					 * We have a SIMPLE sack having no
4699					 * prior segments and data on sent
4700					 * queue to be acked.. Use the
4701					 * faster path sack processing. We
4702					 * also allow window update sacks
4703					 * with no missing segments to go
4704					 * this way too.
4705					 */
4706					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen);
4707				} else {
4708					if (netp && *netp)
4709						sctp_handle_sack(m, offset_seg, offset_dup,
4710						    stcb, *netp,
4711						    num_seg, 0, num_dup, &abort_now, flags,
4712						    cum_ack, a_rwnd, ecne_seen);
4713				}
4714				if (abort_now) {
4715					/* ABORT signal from sack processing */
4716					*offset = length;
4717					return (NULL);
4718				}
4719				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4720				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4721				    (stcb->asoc.stream_queue_cnt == 0)) {
4722					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4723				}
4724			}
4725			break;
4726			/*
4727			 * EY - nr_sack:  If the received chunk is an
4728			 * nr_sack chunk
4729			 */
4730		case SCTP_NR_SELECTIVE_ACK:
4731			{
4732				struct sctp_nr_sack_chunk *nr_sack;
4733				int abort_now = 0;
4734				uint32_t a_rwnd, cum_ack;
4735				uint16_t num_seg, num_nr_seg, num_dup;
4736				uint8_t flags;
4737				int offset_seg, offset_dup;
4738
4739				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
4740				SCTP_STAT_INCR(sctps_recvsacks);
4741				if (stcb == NULL) {
4742					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
4743					break;
4744				}
4745				if ((stcb->asoc.sctp_nr_sack_on_off == 0) ||
4746				    (stcb->asoc.peer_supports_nr_sack == 0)) {
4747					goto unknown_chunk;
4748				}
4749				if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
4750					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
4751					break;
4752				}
4753				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4754					/*-
4755					 * If we have sent a shutdown-ack, we will pay no
4756					 * attention to a sack sent in to us since
4757					 * we don't care anymore.
4758					 */
4759					break;
4760				}
4761				nr_sack = (struct sctp_nr_sack_chunk *)ch;
4762				flags = ch->chunk_flags;
4763				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
4764				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
4765				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
4766				num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
4767				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
4768				if (sizeof(struct sctp_nr_sack_chunk) +
4769				    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
4770				    num_dup * sizeof(uint32_t) != chk_length) {
4771					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
4772					break;
4773				}
4774				offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
4775				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
4776				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4777				    cum_ack, num_seg, a_rwnd);
4778				stcb->asoc.seen_a_sack_this_pkt = 1;
4779				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4780				    (num_seg == 0) && (num_nr_seg == 0) &&
4781				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
4782				    (stcb->asoc.saw_sack_with_frags == 0) &&
4783				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
4784				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
4785					/*
4786					 * We have a SIMPLE sack having no
4787					 * prior segments and data on sent
4788					 * queue to be acked. Use the faster
4789					 * path sack processing. We also
4790					 * allow window update sacks with no
4791					 * missing segments to go this way
4792					 * too.
4793					 */
4794					sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
4795					    &abort_now, ecne_seen);
4796				} else {
4797					if (netp && *netp)
4798						sctp_handle_sack(m, offset_seg, offset_dup,
4799						    stcb, *netp,
4800						    num_seg, num_nr_seg, num_dup, &abort_now, flags,
4801						    cum_ack, a_rwnd, ecne_seen);
4802				}
4803				if (abort_now) {
4804					/* ABORT signal from sack processing */
4805					*offset = length;
4806					return (NULL);
4807				}
4808				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
4809				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
4810				    (stcb->asoc.stream_queue_cnt == 0)) {
4811					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
4812				}
4813			}
4814			break;
4815
4816		case SCTP_HEARTBEAT_REQUEST:
4817			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
4818			if ((stcb) && netp && *netp) {
4819				SCTP_STAT_INCR(sctps_recvheartbeat);
4820				sctp_send_heartbeat_ack(stcb, m, *offset,
4821				    chk_length, *netp);
4822
4823				/* He's alive so give him credit */
4824				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4825					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4826					    stcb->asoc.overall_error_count,
4827					    0,
4828					    SCTP_FROM_SCTP_INPUT,
4829					    __LINE__);
4830				}
4831				stcb->asoc.overall_error_count = 0;
4832			}
4833			break;
4834		case SCTP_HEARTBEAT_ACK:
4835			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
4836			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
4837				/* Its not ours */
4838				*offset = length;
4839				if (locked_tcb) {
4840					SCTP_TCB_UNLOCK(locked_tcb);
4841				}
4842				return (NULL);
4843			}
4844			/* He's alive so give him credit */
4845			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4846				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4847				    stcb->asoc.overall_error_count,
4848				    0,
4849				    SCTP_FROM_SCTP_INPUT,
4850				    __LINE__);
4851			}
4852			stcb->asoc.overall_error_count = 0;
4853			SCTP_STAT_INCR(sctps_recvheartbeatack);
4854			if (netp && *netp)
4855				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
4856				    stcb, *netp);
4857			break;
4858		case SCTP_ABORT_ASSOCIATION:
4859			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
4860			    stcb);
4861			if ((stcb) && netp && *netp)
4862				sctp_handle_abort((struct sctp_abort_chunk *)ch,
4863				    stcb, *netp);
4864			*offset = length;
4865			return (NULL);
4866			break;
4867		case SCTP_SHUTDOWN:
4868			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
4869			    stcb);
4870			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
4871				*offset = length;
4872				if (locked_tcb) {
4873					SCTP_TCB_UNLOCK(locked_tcb);
4874				}
4875				return (NULL);
4876			}
4877			if (netp && *netp) {
4878				int abort_flag = 0;
4879
4880				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
4881				    stcb, *netp, &abort_flag);
4882				if (abort_flag) {
4883					*offset = length;
4884					return (NULL);
4885				}
4886			}
4887			break;
4888		case SCTP_SHUTDOWN_ACK:
4889			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
4890			if ((stcb) && (netp) && (*netp))
4891				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
4892			*offset = length;
4893			return (NULL);
4894			break;
4895
4896		case SCTP_OPERATION_ERROR:
4897			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
4898			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
4899
4900				*offset = length;
4901				return (NULL);
4902			}
4903			break;
4904		case SCTP_COOKIE_ECHO:
4905			SCTPDBG(SCTP_DEBUG_INPUT3,
4906			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
4907			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4908				;
4909			} else {
4910				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4911					/* We are not interested anymore */
4912			abend:
4913					if (stcb) {
4914						SCTP_TCB_UNLOCK(stcb);
4915					}
4916					*offset = length;
4917					return (NULL);
4918				}
4919			}
4920			/*
4921			 * First are we accepting? We do this again here
4922			 * since it is possible that a previous endpoint WAS
4923			 * listening responded to a INIT-ACK and then
4924			 * closed. We opened and bound.. and are now no
4925			 * longer listening.
4926			 */
4927
4928			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
4929				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4930				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
4931					struct mbuf *oper;
4932					struct sctp_paramhdr *phdr;
4933
4934					oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4935					    0, M_DONTWAIT, 1, MT_DATA);
4936					if (oper) {
4937						SCTP_BUF_LEN(oper) =
4938						    sizeof(struct sctp_paramhdr);
4939						phdr = mtod(oper,
4940						    struct sctp_paramhdr *);
4941						phdr->param_type =
4942						    htons(SCTP_CAUSE_OUT_OF_RESC);
4943						phdr->param_length =
4944						    htons(sizeof(struct sctp_paramhdr));
4945					}
4946					sctp_abort_association(inp, stcb, m,
4947					    iphlen, sh, oper, vrf_id, port);
4948				}
4949				*offset = length;
4950				return (NULL);
4951			} else {
4952				struct mbuf *ret_buf;
4953				struct sctp_inpcb *linp;
4954
4955				if (stcb) {
4956					linp = NULL;
4957				} else {
4958					linp = inp;
4959				}
4960
4961				if (linp) {
4962					SCTP_ASOC_CREATE_LOCK(linp);
4963					if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4964					    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4965						SCTP_ASOC_CREATE_UNLOCK(linp);
4966						goto abend;
4967					}
4968				}
4969				if (netp) {
4970					ret_buf =
4971					    sctp_handle_cookie_echo(m, iphlen,
4972					    *offset, sh,
4973					    (struct sctp_cookie_echo_chunk *)ch,
4974					    &inp, &stcb, netp,
4975					    auth_skipped,
4976					    auth_offset,
4977					    auth_len,
4978					    &locked_tcb,
4979					    vrf_id,
4980					    port);
4981				} else {
4982					ret_buf = NULL;
4983				}
4984				if (linp) {
4985					SCTP_ASOC_CREATE_UNLOCK(linp);
4986				}
4987				if (ret_buf == NULL) {
4988					if (locked_tcb) {
4989						SCTP_TCB_UNLOCK(locked_tcb);
4990					}
4991					SCTPDBG(SCTP_DEBUG_INPUT3,
4992					    "GAK, null buffer\n");
4993					auth_skipped = 0;
4994					*offset = length;
4995					return (NULL);
4996				}
4997				/* if AUTH skipped, see if it verified... */
4998				if (auth_skipped) {
4999					got_auth = 1;
5000					auth_skipped = 0;
5001				}
5002				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
5003					/*
5004					 * Restart the timer if we have
5005					 * pending data
5006					 */
5007					struct sctp_tmit_chunk *chk;
5008
5009					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
5010					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
5011				}
5012			}
5013			break;
5014		case SCTP_COOKIE_ACK:
5015			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
5016			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5017				if (locked_tcb) {
5018					SCTP_TCB_UNLOCK(locked_tcb);
5019				}
5020				return (NULL);
5021			}
5022			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5023				/* We are not interested anymore */
5024				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5025					;
5026				} else if (stcb) {
5027#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5028					so = SCTP_INP_SO(inp);
5029					atomic_add_int(&stcb->asoc.refcnt, 1);
5030					SCTP_TCB_UNLOCK(stcb);
5031					SCTP_SOCKET_LOCK(so, 1);
5032					SCTP_TCB_LOCK(stcb);
5033					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5034#endif
5035					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
5036#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5037					SCTP_SOCKET_UNLOCK(so, 1);
5038#endif
5039					*offset = length;
5040					return (NULL);
5041				}
5042			}
5043			/* He's alive so give him credit */
5044			if ((stcb) && netp && *netp) {
5045				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5046					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5047					    stcb->asoc.overall_error_count,
5048					    0,
5049					    SCTP_FROM_SCTP_INPUT,
5050					    __LINE__);
5051				}
5052				stcb->asoc.overall_error_count = 0;
5053				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
5054			}
5055			break;
5056		case SCTP_ECN_ECHO:
5057			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
5058			/* He's alive so give him credit */
5059			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5060				/* Its not ours */
5061				if (locked_tcb) {
5062					SCTP_TCB_UNLOCK(locked_tcb);
5063				}
5064				*offset = length;
5065				return (NULL);
5066			}
5067			if (stcb) {
5068				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5069					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5070					    stcb->asoc.overall_error_count,
5071					    0,
5072					    SCTP_FROM_SCTP_INPUT,
5073					    __LINE__);
5074				}
5075				stcb->asoc.overall_error_count = 0;
5076				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
5077				    stcb);
5078				ecne_seen = 1;
5079			}
5080			break;
5081		case SCTP_ECN_CWR:
5082			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
5083			/* He's alive so give him credit */
5084			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5085				/* Its not ours */
5086				if (locked_tcb) {
5087					SCTP_TCB_UNLOCK(locked_tcb);
5088				}
5089				*offset = length;
5090				return (NULL);
5091			}
5092			if (stcb) {
5093				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5094					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5095					    stcb->asoc.overall_error_count,
5096					    0,
5097					    SCTP_FROM_SCTP_INPUT,
5098					    __LINE__);
5099				}
5100				stcb->asoc.overall_error_count = 0;
5101				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5102			}
5103			break;
5104		case SCTP_SHUTDOWN_COMPLETE:
5105			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
5106			/* must be first and only chunk */
5107			if ((num_chunks > 1) ||
5108			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5109				*offset = length;
5110				if (locked_tcb) {
5111					SCTP_TCB_UNLOCK(locked_tcb);
5112				}
5113				return (NULL);
5114			}
5115			if ((stcb) && netp && *netp) {
5116				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5117				    stcb, *netp);
5118			}
5119			*offset = length;
5120			return (NULL);
5121			break;
5122		case SCTP_ASCONF:
5123			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5124			/* He's alive so give him credit */
5125			if (stcb) {
5126				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5127					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5128					    stcb->asoc.overall_error_count,
5129					    0,
5130					    SCTP_FROM_SCTP_INPUT,
5131					    __LINE__);
5132				}
5133				stcb->asoc.overall_error_count = 0;
5134				sctp_handle_asconf(m, *offset,
5135				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5136				asconf_cnt++;
5137			}
5138			break;
5139		case SCTP_ASCONF_ACK:
5140			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
5141			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5142				/* Its not ours */
5143				if (locked_tcb) {
5144					SCTP_TCB_UNLOCK(locked_tcb);
5145				}
5146				*offset = length;
5147				return (NULL);
5148			}
5149			if ((stcb) && netp && *netp) {
5150				/* He's alive so give him credit */
5151				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5152					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5153					    stcb->asoc.overall_error_count,
5154					    0,
5155					    SCTP_FROM_SCTP_INPUT,
5156					    __LINE__);
5157				}
5158				stcb->asoc.overall_error_count = 0;
5159				sctp_handle_asconf_ack(m, *offset,
5160				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5161				if (abort_no_unlock)
5162					return (NULL);
5163			}
5164			break;
5165		case SCTP_FORWARD_CUM_TSN:
5166			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
5167			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5168				/* Its not ours */
5169				if (locked_tcb) {
5170					SCTP_TCB_UNLOCK(locked_tcb);
5171				}
5172				*offset = length;
5173				return (NULL);
5174			}
5175			/* He's alive so give him credit */
5176			if (stcb) {
5177				int abort_flag = 0;
5178
5179				stcb->asoc.overall_error_count = 0;
5180				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5181					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5182					    stcb->asoc.overall_error_count,
5183					    0,
5184					    SCTP_FROM_SCTP_INPUT,
5185					    __LINE__);
5186				}
5187				*fwd_tsn_seen = 1;
5188				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5189					/* We are not interested anymore */
5190#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5191					so = SCTP_INP_SO(inp);
5192					atomic_add_int(&stcb->asoc.refcnt, 1);
5193					SCTP_TCB_UNLOCK(stcb);
5194					SCTP_SOCKET_LOCK(so, 1);
5195					SCTP_TCB_LOCK(stcb);
5196					atomic_subtract_int(&stcb->asoc.refcnt, 1);
5197#endif
5198					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5199#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5200					SCTP_SOCKET_UNLOCK(so, 1);
5201#endif
5202					*offset = length;
5203					return (NULL);
5204				}
5205				sctp_handle_forward_tsn(stcb,
5206				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5207				if (abort_flag) {
5208					*offset = length;
5209					return (NULL);
5210				} else {
5211					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5212						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5213						    stcb->asoc.overall_error_count,
5214						    0,
5215						    SCTP_FROM_SCTP_INPUT,
5216						    __LINE__);
5217					}
5218					stcb->asoc.overall_error_count = 0;
5219				}
5220
5221			}
5222			break;
5223		case SCTP_STREAM_RESET:
5224			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5225			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5226				/* Its not ours */
5227				if (locked_tcb) {
5228					SCTP_TCB_UNLOCK(locked_tcb);
5229				}
5230				*offset = length;
5231				return (NULL);
5232			}
5233			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5234				/* We are not interested anymore */
5235#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5236				so = SCTP_INP_SO(inp);
5237				atomic_add_int(&stcb->asoc.refcnt, 1);
5238				SCTP_TCB_UNLOCK(stcb);
5239				SCTP_SOCKET_LOCK(so, 1);
5240				SCTP_TCB_LOCK(stcb);
5241				atomic_subtract_int(&stcb->asoc.refcnt, 1);
5242#endif
5243				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5244#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5245				SCTP_SOCKET_UNLOCK(so, 1);
5246#endif
5247				*offset = length;
5248				return (NULL);
5249			}
5250			if (stcb->asoc.peer_supports_strreset == 0) {
5251				/*
5252				 * hmm, peer should have announced this, but
5253				 * we will turn it on since he is sending us
5254				 * a stream reset.
5255				 */
5256				stcb->asoc.peer_supports_strreset = 1;
5257			}
5258			if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
5259				/* stop processing */
5260				*offset = length;
5261				return (NULL);
5262			}
5263			break;
5264		case SCTP_PACKET_DROPPED:
5265			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5266			/* re-get it all please */
5267			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5268				/* Its not ours */
5269				if (locked_tcb) {
5270					SCTP_TCB_UNLOCK(locked_tcb);
5271				}
5272				*offset = length;
5273				return (NULL);
5274			}
5275			if (ch && (stcb) && netp && (*netp)) {
5276				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5277				    stcb, *netp,
5278				    min(chk_length, (sizeof(chunk_buf) - 4)));
5279
5280			}
5281			break;
5282
5283		case SCTP_AUTHENTICATION:
5284			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5285			if (SCTP_BASE_SYSCTL(sctp_auth_disable))
5286				goto unknown_chunk;
5287
5288			if (stcb == NULL) {
5289				/* save the first AUTH for later processing */
5290				if (auth_skipped == 0) {
5291					auth_offset = *offset;
5292					auth_len = chk_length;
5293					auth_skipped = 1;
5294				}
5295				/* skip this chunk (temporarily) */
5296				goto next_chunk;
5297			}
5298			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5299			    (chk_length > (sizeof(struct sctp_auth_chunk) +
5300			    SCTP_AUTH_DIGEST_LEN_MAX))) {
5301				/* Its not ours */
5302				if (locked_tcb) {
5303					SCTP_TCB_UNLOCK(locked_tcb);
5304				}
5305				*offset = length;
5306				return (NULL);
5307			}
5308			if (got_auth == 1) {
5309				/* skip this chunk... it's already auth'd */
5310				goto next_chunk;
5311			}
5312			got_auth = 1;
5313			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5314			    m, *offset)) {
5315				/* auth HMAC failed so dump the packet */
5316				*offset = length;
5317				return (stcb);
5318			} else {
5319				/* remaining chunks are HMAC checked */
5320				stcb->asoc.authenticated = 1;
5321			}
5322			break;
5323
5324		default:
5325	unknown_chunk:
5326			/* it's an unknown chunk! */
5327			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5328				struct mbuf *mm;
5329				struct sctp_paramhdr *phd;
5330
5331				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
5332				    0, M_DONTWAIT, 1, MT_DATA);
5333				if (mm) {
5334					phd = mtod(mm, struct sctp_paramhdr *);
5335					/*
5336					 * We cheat and use param type since
5337					 * we did not bother to define a
5338					 * error cause struct. They are the
5339					 * same basic format with different
5340					 * names.
5341					 */
5342					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5343					phd->param_length = htons(chk_length + sizeof(*phd));
5344					SCTP_BUF_LEN(mm) = sizeof(*phd);
5345					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
5346					    M_DONTWAIT);
5347					if (SCTP_BUF_NEXT(mm)) {
5348#ifdef SCTP_MBUF_LOGGING
5349						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5350							struct mbuf *mat;
5351
5352							mat = SCTP_BUF_NEXT(mm);
5353							while (mat) {
5354								if (SCTP_BUF_IS_EXTENDED(mat)) {
5355									sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5356								}
5357								mat = SCTP_BUF_NEXT(mat);
5358							}
5359						}
5360#endif
5361						sctp_queue_op_err(stcb, mm);
5362					} else {
5363						sctp_m_freem(mm);
5364					}
5365				}
5366			}
5367			if ((ch->chunk_type & 0x80) == 0) {
5368				/* discard this packet */
5369				*offset = length;
5370				return (stcb);
5371			}	/* else skip this bad chunk and continue... */
5372			break;
5373		}		/* switch (ch->chunk_type) */
5374
5375
5376next_chunk:
5377		/* get the next chunk */
5378		*offset += SCTP_SIZE32(chk_length);
5379		if (*offset >= length) {
5380			/* no more data left in the mbuf chain */
5381			break;
5382		}
5383		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5384		    sizeof(struct sctp_chunkhdr), chunk_buf);
5385		if (ch == NULL) {
5386			if (locked_tcb) {
5387				SCTP_TCB_UNLOCK(locked_tcb);
5388			}
5389			*offset = length;
5390			return (NULL);
5391		}
5392	}			/* while */
5393
5394	if (asconf_cnt > 0 && stcb != NULL) {
5395		sctp_send_asconf_ack(stcb);
5396	}
5397	return (stcb);
5398}
5399
5400
5401#ifdef INVARIANTS
5402#ifdef __GNUC__
5403__attribute__((noinline))
5404#endif
5405	void
5406	     sctp_validate_no_locks(struct sctp_inpcb *inp)
5407{
5408	struct sctp_tcb *lstcb;
5409
5410	LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) {
5411		if (mtx_owned(&lstcb->tcb_mtx)) {
5412			panic("Own lock on stcb at return from input");
5413		}
5414	}
5415	if (mtx_owned(&inp->inp_create_mtx)) {
5416		panic("Own create lock on inp");
5417	}
5418	if (mtx_owned(&inp->inp_mtx)) {
5419		panic("Own inp lock on inp");
5420	}
5421}
5422
5423#endif
5424
5425/*
5426 * common input chunk processing (v4 and v6)
5427 */
5428void
5429sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
5430    int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
5431    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
5432    uint8_t ecn_bits, uint32_t vrf_id, uint16_t port)
5433{
5434	/*
5435	 * Control chunk processing
5436	 */
5437	uint32_t high_tsn;
5438	int fwd_tsn_seen = 0, data_processed = 0;
5439	struct mbuf *m = *mm;
5440	int abort_flag = 0;
5441	int un_sent;
5442	int cnt_ctrl_ready = 0;
5443
5444	SCTP_STAT_INCR(sctps_recvdatagrams);
5445#ifdef SCTP_AUDITING_ENABLED
5446	sctp_audit_log(0xE0, 1);
5447	sctp_auditing(0, inp, stcb, net);
5448#endif
5449
5450	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5451	    m, iphlen, offset, length, stcb);
5452	if (stcb) {
5453		/* always clear this before beginning a packet */
5454		stcb->asoc.authenticated = 0;
5455		stcb->asoc.seen_a_sack_this_pkt = 0;
5456		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5457		    stcb, stcb->asoc.state);
5458
5459		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5460		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5461			/*-
5462			 * If we hit here, we had a ref count
5463			 * up when the assoc was aborted and the
5464			 * timer is clearing out the assoc, we should
5465			 * NOT respond to any packet.. its OOTB.
5466			 */
5467			SCTP_TCB_UNLOCK(stcb);
5468			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5469			    vrf_id, port);
5470			goto out_now;
5471		}
5472	}
5473	if (IS_SCTP_CONTROL(ch)) {
5474		/* process the control portion of the SCTP packet */
5475		/* sa_ignore NO_NULL_CHK */
5476		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
5477		    inp, stcb, &net, &fwd_tsn_seen, vrf_id, port);
5478		if (stcb) {
5479			/*
5480			 * This covers us if the cookie-echo was there and
5481			 * it changes our INP.
5482			 */
5483			inp = stcb->sctp_ep;
5484			if ((net) && (port)) {
5485				if (net->port == 0) {
5486					sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5487				}
5488				net->port = port;
5489			}
5490		}
5491	} else {
5492		/*
5493		 * no control chunks, so pre-process DATA chunks (these
5494		 * checks are taken care of by control processing)
5495		 */
5496
5497		/*
5498		 * if DATA only packet, and auth is required, then punt...
5499		 * can't have authenticated without any AUTH (control)
5500		 * chunks
5501		 */
5502		if ((stcb != NULL) &&
5503		    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5504		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
5505			/* "silently" ignore */
5506			SCTP_STAT_INCR(sctps_recvauthmissing);
5507			SCTP_TCB_UNLOCK(stcb);
5508			goto out_now;
5509		}
5510		if (stcb == NULL) {
5511			/* out of the blue DATA chunk */
5512			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5513			    vrf_id, port);
5514			goto out_now;
5515		}
5516		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5517			/* v_tag mismatch! */
5518			SCTP_STAT_INCR(sctps_badvtag);
5519			SCTP_TCB_UNLOCK(stcb);
5520			goto out_now;
5521		}
5522	}
5523
5524	if (stcb == NULL) {
5525		/*
5526		 * no valid TCB for this packet, or we found it's a bad
5527		 * packet while processing control, or we're done with this
5528		 * packet (done or skip rest of data), so we drop it...
5529		 */
5530		goto out_now;
5531	}
5532	/*
5533	 * DATA chunk processing
5534	 */
5535	/* plow through the data chunks while length > offset */
5536
5537	/*
5538	 * Rest should be DATA only.  Check authentication state if AUTH for
5539	 * DATA is required.
5540	 */
5541	if ((length > offset) &&
5542	    (stcb != NULL) &&
5543	    !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
5544	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
5545	    !stcb->asoc.authenticated) {
5546		/* "silently" ignore */
5547		SCTP_STAT_INCR(sctps_recvauthmissing);
5548		SCTPDBG(SCTP_DEBUG_AUTH1,
5549		    "Data chunk requires AUTH, skipped\n");
5550		goto trigger_send;
5551	}
5552	if (length > offset) {
5553		int retval;
5554
5555		/*
5556		 * First check to make sure our state is correct. We would
5557		 * not get here unless we really did have a tag, so we don't
5558		 * abort if this happens, just dump the chunk silently.
5559		 */
5560		switch (SCTP_GET_STATE(&stcb->asoc)) {
5561		case SCTP_STATE_COOKIE_ECHOED:
5562			/*
5563			 * we consider data with valid tags in this state
5564			 * shows us the cookie-ack was lost. Imply it was
5565			 * there.
5566			 */
5567			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5568				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5569				    stcb->asoc.overall_error_count,
5570				    0,
5571				    SCTP_FROM_SCTP_INPUT,
5572				    __LINE__);
5573			}
5574			stcb->asoc.overall_error_count = 0;
5575			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5576			break;
5577		case SCTP_STATE_COOKIE_WAIT:
5578			/*
5579			 * We consider OOTB any data sent during asoc setup.
5580			 */
5581			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5582			    vrf_id, port);
5583			SCTP_TCB_UNLOCK(stcb);
5584			goto out_now;
5585			/* sa_ignore NOTREACHED */
5586			break;
5587		case SCTP_STATE_EMPTY:	/* should not happen */
5588		case SCTP_STATE_INUSE:	/* should not happen */
5589		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5590		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5591		default:
5592			SCTP_TCB_UNLOCK(stcb);
5593			goto out_now;
5594			/* sa_ignore NOTREACHED */
5595			break;
5596		case SCTP_STATE_OPEN:
5597		case SCTP_STATE_SHUTDOWN_SENT:
5598			break;
5599		}
5600		/* plow through the data chunks while length > offset */
5601		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
5602		    inp, stcb, net, &high_tsn);
5603		if (retval == 2) {
5604			/*
5605			 * The association aborted, NO UNLOCK needed since
5606			 * the association is destroyed.
5607			 */
5608			goto out_now;
5609		}
5610		data_processed = 1;
5611		/*
5612		 * Anything important needs to have been m_copy'ed in
5613		 * process_data
5614		 */
5615	}
5616	/* take care of ecn */
5617	if (stcb->asoc.ecn_allowed && ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
5618		/* Yep, we need to add a ECNE */
5619		sctp_send_ecn_echo(stcb, net, high_tsn);
5620	}
5621	if ((data_processed == 0) && (fwd_tsn_seen)) {
5622		int was_a_gap;
5623		uint32_t highest_tsn;
5624
5625		if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
5626			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
5627		} else {
5628			highest_tsn = stcb->asoc.highest_tsn_inside_map;
5629		}
5630		was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
5631		stcb->asoc.send_sack = 1;
5632		sctp_sack_check(stcb, was_a_gap, &abort_flag);
5633		if (abort_flag) {
5634			/* Again, we aborted so NO UNLOCK needed */
5635			goto out_now;
5636		}
5637	} else if (fwd_tsn_seen) {
5638		stcb->asoc.send_sack = 1;
5639	}
5640	/* trigger send of any chunks in queue... */
5641trigger_send:
5642#ifdef SCTP_AUDITING_ENABLED
5643	sctp_audit_log(0xE0, 2);
5644	sctp_auditing(1, inp, stcb, net);
5645#endif
5646	SCTPDBG(SCTP_DEBUG_INPUT1,
5647	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5648	    stcb->asoc.peers_rwnd,
5649	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5650	    stcb->asoc.total_flight);
5651	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5652	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
5653		cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
5654	}
5655	if (cnt_ctrl_ready ||
5656	    ((un_sent) &&
5657	    (stcb->asoc.peers_rwnd > 0 ||
5658	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5659		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5660		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5661		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5662	}
5663#ifdef SCTP_AUDITING_ENABLED
5664	sctp_audit_log(0xE0, 3);
5665	sctp_auditing(2, inp, stcb, net);
5666#endif
5667	SCTP_TCB_UNLOCK(stcb);
5668out_now:
5669#ifdef INVARIANTS
5670	sctp_validate_no_locks(inp);
5671#endif
5672	return;
5673}
5674
5675#if 0
5676static void
5677sctp_print_mbuf_chain(struct mbuf *m)
5678{
5679	for (; m; m = SCTP_BUF_NEXT(m)) {
5680		printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m));
5681		if (SCTP_BUF_IS_EXTENDED(m))
5682			printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m));
5683	}
5684}
5685
5686#endif
5687
5688void
5689sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
5690{
5691#ifdef SCTP_MBUF_LOGGING
5692	struct mbuf *mat;
5693
5694#endif
5695	struct mbuf *m;
5696	int iphlen;
5697	uint32_t vrf_id = 0;
5698	uint8_t ecn_bits;
5699	struct ip *ip;
5700	struct sctphdr *sh;
5701	struct sctp_inpcb *inp = NULL;
5702	struct sctp_nets *net;
5703	struct sctp_tcb *stcb = NULL;
5704	struct sctp_chunkhdr *ch;
5705	int refcount_up = 0;
5706	int length, mlen, offset;
5707
5708#if !defined(SCTP_WITH_NO_CSUM)
5709	uint32_t check, calc_check;
5710
5711#endif
5712
5713	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5714		SCTP_RELEASE_PKT(i_pak);
5715		return;
5716	}
5717	mlen = SCTP_HEADER_LEN(i_pak);
5718	iphlen = off;
5719	m = SCTP_HEADER_TO_CHAIN(i_pak);
5720
5721	net = NULL;
5722	SCTP_STAT_INCR(sctps_recvpackets);
5723	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5724
5725
5726#ifdef SCTP_MBUF_LOGGING
5727	/* Log in any input mbufs */
5728	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5729		mat = m;
5730		while (mat) {
5731			if (SCTP_BUF_IS_EXTENDED(mat)) {
5732				sctp_log_mb(mat, SCTP_MBUF_INPUT);
5733			}
5734			mat = SCTP_BUF_NEXT(mat);
5735		}
5736	}
5737#endif
5738#ifdef  SCTP_PACKET_LOGGING
5739	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
5740		sctp_packet_log(m, mlen);
5741#endif
5742	/*
5743	 * Must take out the iphlen, since mlen expects this (only effect lb
5744	 * case)
5745	 */
5746	mlen -= iphlen;
5747
5748	/*
5749	 * Get IP, SCTP, and first chunk header together in first mbuf.
5750	 */
5751	ip = mtod(m, struct ip *);
5752	offset = iphlen + sizeof(*sh) + sizeof(*ch);
5753	if (SCTP_BUF_LEN(m) < offset) {
5754		if ((m = m_pullup(m, offset)) == 0) {
5755			SCTP_STAT_INCR(sctps_hdrops);
5756			return;
5757		}
5758		ip = mtod(m, struct ip *);
5759	}
5760	/* validate mbuf chain length with IP payload length */
5761	if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) {
5762		SCTP_STAT_INCR(sctps_hdrops);
5763		goto bad;
5764	}
5765	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5766	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
5767	SCTPDBG(SCTP_DEBUG_INPUT1,
5768	    "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
5769
5770	/* SCTP does not allow broadcasts or multicasts */
5771	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
5772		goto bad;
5773	}
5774	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
5775		/*
5776		 * We only look at broadcast if its a front state, All
5777		 * others we will not have a tcb for anyway.
5778		 */
5779		goto bad;
5780	}
5781	/* validate SCTP checksum */
5782	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
5783	    "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
5784	    m->m_pkthdr.len,
5785	    if_name(m->m_pkthdr.rcvif),
5786	    m->m_pkthdr.csum_flags);
5787#if defined(SCTP_WITH_NO_CSUM)
5788	SCTP_STAT_INCR(sctps_recvnocrc);
5789#else
5790	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
5791		SCTP_STAT_INCR(sctps_recvhwcrc);
5792		goto sctp_skip_csum_4;
5793	}
5794	check = sh->checksum;	/* save incoming checksum */
5795	sh->checksum = 0;	/* prepare for calc */
5796	calc_check = sctp_calculate_cksum(m, iphlen);
5797	sh->checksum = check;
5798	SCTP_STAT_INCR(sctps_recvswcrc);
5799	if (calc_check != check) {
5800		SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5801		    calc_check, check, m, mlen, iphlen);
5802
5803		stcb = sctp_findassociation_addr(m, iphlen,
5804		    offset - sizeof(*ch),
5805		    sh, ch, &inp, &net,
5806		    vrf_id);
5807		if ((net) && (port)) {
5808			if (net->port == 0) {
5809				sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5810			}
5811			net->port = port;
5812		}
5813		if ((inp) && (stcb)) {
5814			sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
5815			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5816		} else if ((inp != NULL) && (stcb == NULL)) {
5817			refcount_up = 1;
5818		}
5819		SCTP_STAT_INCR(sctps_badsum);
5820		SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5821		goto bad;
5822	}
5823sctp_skip_csum_4:
5824#endif
5825	/* destination port of 0 is illegal, based on RFC2960. */
5826	if (sh->dest_port == 0) {
5827		SCTP_STAT_INCR(sctps_hdrops);
5828		goto bad;
5829	}
5830	/*
5831	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
5832	 * IP/SCTP/first chunk header...
5833	 */
5834	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
5835	    sh, ch, &inp, &net, vrf_id);
5836	if ((net) && (port)) {
5837		if (net->port == 0) {
5838			sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
5839		}
5840		net->port = port;
5841	}
5842	/* inp's ref-count increased && stcb locked */
5843	if (inp == NULL) {
5844		struct sctp_init_chunk *init_chk, chunk_buf;
5845
5846		SCTP_STAT_INCR(sctps_noport);
5847#ifdef ICMP_BANDLIM
5848		/*
5849		 * we use the bandwidth limiting to protect against sending
5850		 * too many ABORTS all at once. In this case these count the
5851		 * same as an ICMP message.
5852		 */
5853		if (badport_bandlim(0) < 0)
5854			goto bad;
5855#endif				/* ICMP_BANDLIM */
5856		SCTPDBG(SCTP_DEBUG_INPUT1,
5857		    "Sending a ABORT from packet entry!\n");
5858		if (ch->chunk_type == SCTP_INITIATION) {
5859			/*
5860			 * we do a trick here to get the INIT tag, dig in
5861			 * and get the tag from the INIT and put it in the
5862			 * common header.
5863			 */
5864			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
5865			    iphlen + sizeof(*sh), sizeof(*init_chk),
5866			    (uint8_t *) & chunk_buf);
5867			if (init_chk != NULL)
5868				sh->v_tag = init_chk->init.initiate_tag;
5869		}
5870		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5871			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
5872			goto bad;
5873		}
5874		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5875			goto bad;
5876		}
5877		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
5878			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
5879		goto bad;
5880	} else if (stcb == NULL) {
5881		refcount_up = 1;
5882	}
5883#ifdef IPSEC
5884	/*
5885	 * I very much doubt any of the IPSEC stuff will work but I have no
5886	 * idea, so I will leave it in place.
5887	 */
5888	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5889		MODULE_GLOBAL(ipsec4stat).in_polvio++;
5890		SCTP_STAT_INCR(sctps_hdrops);
5891		goto bad;
5892	}
5893#endif				/* IPSEC */
5894
5895	/*
5896	 * common chunk processing
5897	 */
5898	length = ip->ip_len + iphlen;
5899	offset -= sizeof(struct sctp_chunkhdr);
5900
5901	ecn_bits = ip->ip_tos;
5902
5903	/* sa_ignore NO_NULL_CHK */
5904	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
5905	    inp, stcb, net, ecn_bits, vrf_id, port);
5906	/* inp's ref-count reduced && stcb unlocked */
5907	if (m) {
5908		sctp_m_freem(m);
5909	}
5910	if ((inp) && (refcount_up)) {
5911		/* reduce ref-count */
5912		SCTP_INP_DECR_REF(inp);
5913	}
5914	return;
5915bad:
5916	if (stcb) {
5917		SCTP_TCB_UNLOCK(stcb);
5918	}
5919	if ((inp) && (refcount_up)) {
5920		/* reduce ref-count */
5921		SCTP_INP_DECR_REF(inp);
5922	}
5923	if (m) {
5924		sctp_m_freem(m);
5925	}
5926	return;
5927}
5928
5929
5930void
5931sctp_input(struct mbuf *m, int off)
5932{
5933#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
5934	struct ip *ip;
5935	struct sctphdr *sh;
5936	int offset;
5937	int cpu_to_use;
5938
5939	if (mp_maxid > 1) {
5940		ip = mtod(m, struct ip *);
5941		offset = off + sizeof(*sh);
5942		if (SCTP_BUF_LEN(m) < offset) {
5943			if ((m = m_pullup(m, offset)) == 0) {
5944				SCTP_STAT_INCR(sctps_hdrops);
5945				return;
5946			}
5947			ip = mtod(m, struct ip *);
5948		}
5949		sh = (struct sctphdr *)((caddr_t)ip + off);
5950		cpu_to_use = ntohl(sh->v_tag) % mp_maxid;
5951		sctp_queue_to_mcore(m, off, cpu_to_use);
5952		return;
5953	}
5954#endif
5955	sctp_input_with_port(m, off, 0);
5956}
5957