sctp_input.c revision 172396
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 172396 2007-10-01 03:22:29Z rrs $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_auth.h>
45#include <netinet/sctp_indata.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_bsd_addr.h>
48#include <netinet/sctp_timer.h>
49
50
51
52static void
53sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
54{
55	struct sctp_nets *net;
56
57	/*
58	 * This now not only stops all cookie timers it also stops any INIT
59	 * timers as well. This will make sure that the timers are stopped
60	 * in all collision cases.
61	 */
62	SCTP_TCB_LOCK_ASSERT(stcb);
63	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
64		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
65			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
66			    stcb->sctp_ep,
67			    stcb,
68			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
69		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
70			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
71			    stcb->sctp_ep,
72			    stcb,
73			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
74		}
75	}
76}
77
78/* INIT handler */
79static void
80sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
81    struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
82    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
83{
84	struct sctp_init *init;
85	struct mbuf *op_err;
86	uint32_t init_limit;
87
88	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
89	    stcb);
90	if (stcb == NULL) {
91		SCTP_INP_RLOCK(inp);
92		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
93			goto outnow;
94		}
95	}
96	op_err = NULL;
97	init = &cp->init;
98	/* First are we accepting? */
99	if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
100		SCTPDBG(SCTP_DEBUG_INPUT2,
101		    "sctp_handle_init: Abort, so_qlimit:%d\n",
102		    inp->sctp_socket->so_qlimit);
103		/*
104		 * FIX ME ?? What about TCP model and we have a
105		 * match/restart case?
106		 */
107		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
108		    vrf_id);
109		if (stcb)
110			*abort_no_unlock = 1;
111		goto outnow;
112	}
113	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
114		/* Invalid length */
115		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
116		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
117		    vrf_id);
118		if (stcb)
119			*abort_no_unlock = 1;
120		goto outnow;
121	}
122	/* validate parameters */
123	if (init->initiate_tag == 0) {
124		/* protocol error... send abort */
125		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
126		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
127		    vrf_id);
128		if (stcb)
129			*abort_no_unlock = 1;
130		goto outnow;
131	}
132	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
133		/* invalid parameter... send abort */
134		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
135		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
136		    vrf_id);
137		if (stcb)
138			*abort_no_unlock = 1;
139		goto outnow;
140	}
141	if (init->num_inbound_streams == 0) {
142		/* protocol error... send abort */
143		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
144		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
145		    vrf_id);
146		if (stcb)
147			*abort_no_unlock = 1;
148		goto outnow;
149	}
150	if (init->num_outbound_streams == 0) {
151		/* protocol error... send abort */
152		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
153		sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
154		    vrf_id);
155		if (stcb)
156			*abort_no_unlock = 1;
157		goto outnow;
158	}
159	init_limit = offset + ntohs(cp->ch.chunk_length);
160	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
161	    init_limit)) {
162		/* auth parameter(s) error... send abort */
163		sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id);
164		if (stcb)
165			*abort_no_unlock = 1;
166		goto outnow;
167	}
168	/* send an INIT-ACK w/cookie */
169	SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
170	sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id,
171	    SCTP_HOLDS_LOCK);
172outnow:
173	if (stcb == NULL) {
174		SCTP_INP_RUNLOCK(inp);
175	}
176}
177
178/*
179 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
180 */
181
182int
183sctp_is_there_unsent_data(struct sctp_tcb *stcb)
184{
185	int unsent_data = 0;
186	struct sctp_stream_queue_pending *sp;
187	struct sctp_stream_out *strq;
188	struct sctp_association *asoc;
189
190	/*
191	 * This function returns the number of streams that have true unsent
192	 * data on them. Note that as it looks through it will clean up any
193	 * places that have old data that has been sent but left at top of
194	 * stream queue.
195	 */
196	asoc = &stcb->asoc;
197	SCTP_TCB_SEND_LOCK(stcb);
198	if (!TAILQ_EMPTY(&asoc->out_wheel)) {
199		/* Check to see if some data queued */
200		TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) {
201	is_there_another:
202			/* sa_ignore FREED_MEMORY */
203			sp = TAILQ_FIRST(&strq->outqueue);
204			if (sp == NULL) {
205				continue;
206			}
207			if ((sp->msg_is_complete) &&
208			    (sp->length == 0) &&
209			    (sp->sender_all_done)) {
210				/*
211				 * We are doing differed cleanup. Last time
212				 * through when we took all the data the
213				 * sender_all_done was not set.
214				 */
215				if (sp->put_last_out == 0) {
216					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
217					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
218					    sp->sender_all_done,
219					    sp->length,
220					    sp->msg_is_complete,
221					    sp->put_last_out);
222				}
223				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
224				TAILQ_REMOVE(&strq->outqueue, sp, next);
225				sctp_free_remote_addr(sp->net);
226				if (sp->data) {
227					sctp_m_freem(sp->data);
228					sp->data = NULL;
229				}
230				sctp_free_a_strmoq(stcb, sp);
231				goto is_there_another;
232			} else {
233				unsent_data++;
234				continue;
235			}
236		}
237	}
238	SCTP_TCB_SEND_UNLOCK(stcb);
239	return (unsent_data);
240}
241
242static int
243sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
244    struct sctp_nets *net)
245{
246	struct sctp_init *init;
247	struct sctp_association *asoc;
248	struct sctp_nets *lnet;
249	unsigned int i;
250
251	init = &cp->init;
252	asoc = &stcb->asoc;
253	/* save off parameters */
254	asoc->peer_vtag = ntohl(init->initiate_tag);
255	asoc->peers_rwnd = ntohl(init->a_rwnd);
256	if (TAILQ_FIRST(&asoc->nets)) {
257		/* update any ssthresh's that may have a default */
258		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
259			lnet->ssthresh = asoc->peers_rwnd;
260
261			if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
262				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
263			}
264		}
265	}
266	SCTP_TCB_SEND_LOCK(stcb);
267	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
268		unsigned int newcnt;
269		struct sctp_stream_out *outs;
270		struct sctp_stream_queue_pending *sp;
271
272		/* cut back on number of streams */
273		newcnt = ntohs(init->num_inbound_streams);
274		/* This if is probably not needed but I am cautious */
275		if (asoc->strmout) {
276			/* First make sure no data chunks are trapped */
277			for (i = newcnt; i < asoc->pre_open_streams; i++) {
278				outs = &asoc->strmout[i];
279				sp = TAILQ_FIRST(&outs->outqueue);
280				while (sp) {
281					TAILQ_REMOVE(&outs->outqueue, sp,
282					    next);
283					asoc->stream_queue_cnt--;
284					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
285					    stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
286					    sp, SCTP_SO_NOT_LOCKED);
287					if (sp->data) {
288						sctp_m_freem(sp->data);
289						sp->data = NULL;
290					}
291					sctp_free_remote_addr(sp->net);
292					sp->net = NULL;
293					/* Free the chunk */
294					SCTP_PRINTF("sp:%p tcb:%p weird free case\n",
295					    sp, stcb);
296
297					sctp_free_a_strmoq(stcb, sp);
298					/* sa_ignore FREED_MEMORY */
299					sp = TAILQ_FIRST(&outs->outqueue);
300				}
301			}
302		}
303		/* cut back the count and abandon the upper streams */
304		asoc->pre_open_streams = newcnt;
305	}
306	SCTP_TCB_SEND_UNLOCK(stcb);
307	asoc->streamoutcnt = asoc->pre_open_streams;
308	/* init tsn's */
309	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
310	if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
311		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
312	}
313	/* This is the next one we expect */
314	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
315
316	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
317	asoc->cumulative_tsn = asoc->asconf_seq_in;
318	asoc->last_echo_tsn = asoc->asconf_seq_in;
319	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
320	/* open the requested streams */
321
322	if (asoc->strmin != NULL) {
323		/* Free the old ones */
324		struct sctp_queued_to_read *ctl;
325
326		for (i = 0; i < asoc->streamincnt; i++) {
327			ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
328			while (ctl) {
329				TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
330				sctp_free_remote_addr(ctl->whoFrom);
331				ctl->whoFrom = NULL;
332				sctp_m_freem(ctl->data);
333				ctl->data = NULL;
334				sctp_free_a_readq(stcb, ctl);
335				ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
336			}
337		}
338		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
339	}
340	asoc->streamincnt = ntohs(init->num_outbound_streams);
341	if (asoc->streamincnt > MAX_SCTP_STREAMS) {
342		asoc->streamincnt = MAX_SCTP_STREAMS;
343	}
344	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
345	    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
346	if (asoc->strmin == NULL) {
347		/* we didn't get memory for the streams! */
348		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
349		return (-1);
350	}
351	for (i = 0; i < asoc->streamincnt; i++) {
352		asoc->strmin[i].stream_no = i;
353		asoc->strmin[i].last_sequence_delivered = 0xffff;
354		/*
355		 * U-stream ranges will be set when the cookie is unpacked.
356		 * Or for the INIT sender they are un set (if pr-sctp not
357		 * supported) when the INIT-ACK arrives.
358		 */
359		TAILQ_INIT(&asoc->strmin[i].inqueue);
360		asoc->strmin[i].delivery_started = 0;
361	}
362	/*
363	 * load_address_from_init will put the addresses into the
364	 * association when the COOKIE is processed or the INIT-ACK is
365	 * processed. Both types of COOKIE's existing and new call this
366	 * routine. It will remove addresses that are no longer in the
367	 * association (for the restarting case where addresses are
368	 * removed). Up front when the INIT arrives we will discard it if it
369	 * is a restart and new addresses have been added.
370	 */
371	/* sa_ignore MEMLEAK */
372	return (0);
373}
374
375/*
376 * INIT-ACK message processing/consumption returns value < 0 on error
377 */
378static int
379sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
380    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
381    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
382{
383	struct sctp_association *asoc;
384	struct mbuf *op_err;
385	int retval, abort_flag;
386	uint32_t initack_limit;
387
388	/* First verify that we have no illegal param's */
389	abort_flag = 0;
390	op_err = NULL;
391
392	op_err = sctp_arethere_unrecognized_parameters(m,
393	    (offset + sizeof(struct sctp_init_chunk)),
394	    &abort_flag, (struct sctp_chunkhdr *)cp);
395	if (abort_flag) {
396		/* Send an abort and notify peer */
397		sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
398		*abort_no_unlock = 1;
399		return (-1);
400	}
401	asoc = &stcb->asoc;
402	/* process the peer's parameters in the INIT-ACK */
403	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
404	if (retval < 0) {
405		return (retval);
406	}
407	initack_limit = offset + ntohs(cp->ch.chunk_length);
408	/* load all addresses */
409	if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
410	    (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
411	    NULL))) {
412		/* Huh, we should abort */
413		SCTPDBG(SCTP_DEBUG_INPUT1,
414		    "Load addresses from INIT causes an abort %d\n",
415		    retval);
416		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
417		    NULL, 0);
418		*abort_no_unlock = 1;
419		return (-1);
420	}
421	/* if the peer doesn't support asconf, flush the asconf queue */
422	if (asoc->peer_supports_asconf == 0) {
423		struct sctp_asconf_addr *aparam;
424
425		while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
426			/* sa_ignore FREED_MEMORY */
427			aparam = TAILQ_FIRST(&asoc->asconf_queue);
428			TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
429			SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
430		}
431	}
432	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
433	    stcb->asoc.local_hmacs);
434	if (op_err) {
435		sctp_queue_op_err(stcb, op_err);
436		/* queuing will steal away the mbuf chain to the out queue */
437		op_err = NULL;
438	}
439	/* extract the cookie and queue it to "echo" it back... */
440	if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
441		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
442		    stcb->asoc.overall_error_count,
443		    0,
444		    SCTP_FROM_SCTP_INPUT,
445		    __LINE__);
446	}
447	stcb->asoc.overall_error_count = 0;
448	net->error_count = 0;
449
450	/*
451	 * Cancel the INIT timer, We do this first before queueing the
452	 * cookie. We always cancel at the primary to assue that we are
453	 * canceling the timer started by the INIT which always goes to the
454	 * primary.
455	 */
456	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
457	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
458
459	/* calculate the RTO */
460	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy);
461
462	retval = sctp_send_cookie_echo(m, offset, stcb, net);
463	if (retval < 0) {
464		/*
465		 * No cookie, we probably should send a op error. But in any
466		 * case if there is no cookie in the INIT-ACK, we can
467		 * abandon the peer, its broke.
468		 */
469		if (retval == -3) {
470			/* We abort with an error of missing mandatory param */
471			op_err =
472			    sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
473			if (op_err) {
474				/*
475				 * Expand beyond to include the mandatory
476				 * param cookie
477				 */
478				struct sctp_inv_mandatory_param *mp;
479
480				SCTP_BUF_LEN(op_err) =
481				    sizeof(struct sctp_inv_mandatory_param);
482				mp = mtod(op_err,
483				    struct sctp_inv_mandatory_param *);
484				/* Subtract the reserved param */
485				mp->length =
486				    htons(sizeof(struct sctp_inv_mandatory_param) - 2);
487				mp->num_param = htonl(1);
488				mp->param = htons(SCTP_STATE_COOKIE);
489				mp->resv = 0;
490			}
491			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
492			    sh, op_err, 0);
493			*abort_no_unlock = 1;
494		}
495		return (retval);
496	}
497	return (0);
498}
499
500static void
501sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
502    struct sctp_tcb *stcb, struct sctp_nets *net)
503{
504	struct sockaddr_storage store;
505	struct sockaddr_in *sin;
506	struct sockaddr_in6 *sin6;
507	struct sctp_nets *r_net;
508	struct timeval tv;
509	int req_prim = 0;
510
511	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
512		/* Invalid length */
513		return;
514	}
515	sin = (struct sockaddr_in *)&store;
516	sin6 = (struct sockaddr_in6 *)&store;
517
518	memset(&store, 0, sizeof(store));
519	if (cp->heartbeat.hb_info.addr_family == AF_INET &&
520	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
521		sin->sin_family = cp->heartbeat.hb_info.addr_family;
522		sin->sin_len = cp->heartbeat.hb_info.addr_len;
523		sin->sin_port = stcb->rport;
524		memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
525		    sizeof(sin->sin_addr));
526	} else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
527	    cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
528		sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
529		sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
530		sin6->sin6_port = stcb->rport;
531		memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
532		    sizeof(sin6->sin6_addr));
533	} else {
534		return;
535	}
536	r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
537	if (r_net == NULL) {
538		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
539		return;
540	}
541	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
542	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
543	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
544		/*
545		 * If the its a HB and it's random value is correct when can
546		 * confirm the destination.
547		 */
548		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
549		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
550			stcb->asoc.primary_destination = r_net;
551			r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
552			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
553			r_net = TAILQ_FIRST(&stcb->asoc.nets);
554			if (r_net != stcb->asoc.primary_destination) {
555				/*
556				 * first one on the list is NOT the primary
557				 * sctp_cmpaddr() is much more efficent if
558				 * the primary is the first on the list,
559				 * make it so.
560				 */
561				TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
562				TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
563			}
564			req_prim = 1;
565		}
566		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
567		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
568	}
569	r_net->error_count = 0;
570	r_net->hb_responded = 1;
571	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
572	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
573	if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
574		r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
575		r_net->dest_state |= SCTP_ADDR_REACHABLE;
576		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
577		    SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED);
578		/* now was it the primary? if so restore */
579		if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
580			(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
581		}
582	}
583	/*
584	 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state,
585	 * set the destination to active state and set the cwnd to one or
586	 * two MTU's based on whether PF1 or PF2 is being used. If a T3
587	 * timer is running, for the destination, stop the timer because a
588	 * PF-heartbeat was received.
589	 */
590	if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) ==
591	    SCTP_ADDR_PF) {
592		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
593			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
594			    stcb, net,
595			    SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
596		}
597		net->dest_state &= ~SCTP_ADDR_PF;
598		net->cwnd = net->mtu * sctp_cmt_pf;
599		SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
600		    net, net->cwnd);
601	}
602	/* Now lets do a RTO with this */
603	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy);
604	/* Mobility adaptation */
605	if (req_prim) {
606		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
607		    SCTP_MOBILITY_BASE) ||
608		    sctp_is_mobility_feature_on(stcb->sctp_ep,
609		    SCTP_MOBILITY_FASTHANDOFF)) &&
610		    sctp_is_mobility_feature_on(stcb->sctp_ep,
611		    SCTP_MOBILITY_PRIM_DELETED)) {
612
613			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
614			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
615			    SCTP_MOBILITY_FASTHANDOFF)) {
616				sctp_assoc_immediate_retrans(stcb,
617				    stcb->asoc.primary_destination);
618			}
619			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
620			    SCTP_MOBILITY_BASE)) {
621				sctp_move_chunks_from_deleted_prim(stcb,
622				    stcb->asoc.primary_destination);
623			}
624			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
625			    stcb->asoc.deleted_primary);
626		}
627	}
628}
629
630static void
631sctp_handle_abort(struct sctp_abort_chunk *cp,
632    struct sctp_tcb *stcb, struct sctp_nets *net)
633{
634#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
635	struct socket *so;
636
637#endif
638
639	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
640	if (stcb == NULL)
641		return;
642
643	/* stop any receive timers */
644	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
645	/* notify user of the abort and clean up... */
646	sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
647	/* free the tcb */
648#if defined(SCTP_PANIC_ON_ABORT)
649	printf("stcb:%p state:%d rport:%d net:%p\n",
650	    stcb, stcb->asoc.state, stcb->rport, net);
651	if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
652		panic("Received an ABORT");
653	} else {
654		printf("No panic its in state %x closed\n", stcb->asoc.state);
655	}
656#endif
657	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
658	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
659	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
660		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
661	}
662#ifdef SCTP_ASOCLOG_OF_TSNS
663	sctp_print_out_track_log(stcb);
664#endif
665#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
666	so = SCTP_INP_SO(stcb->sctp_ep);
667	atomic_add_int(&stcb->asoc.refcnt, 1);
668	SCTP_TCB_UNLOCK(stcb);
669	SCTP_SOCKET_LOCK(so, 1);
670	SCTP_TCB_LOCK(stcb);
671	atomic_subtract_int(&stcb->asoc.refcnt, 1);
672#endif
673	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
674	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
675	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
676#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
677	SCTP_SOCKET_UNLOCK(so, 1);
678#endif
679	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
680}
681
682static void
683sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
684    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
685{
686	struct sctp_association *asoc;
687	int some_on_streamwheel;
688
689#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
690	struct socket *so;
691
692#endif
693
694	SCTPDBG(SCTP_DEBUG_INPUT2,
695	    "sctp_handle_shutdown: handling SHUTDOWN\n");
696	if (stcb == NULL)
697		return;
698	asoc = &stcb->asoc;
699	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
700	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
701		return;
702	}
703	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
704		/* Shutdown NOT the expected size */
705		return;
706	} else {
707		sctp_update_acked(stcb, cp, net, abort_flag);
708	}
709	if (asoc->control_pdapi) {
710		/*
711		 * With a normal shutdown we assume the end of last record.
712		 */
713		SCTP_INP_READ_LOCK(stcb->sctp_ep);
714		asoc->control_pdapi->end_added = 1;
715		asoc->control_pdapi->pdapi_aborted = 1;
716		asoc->control_pdapi = NULL;
717		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
718#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
719		so = SCTP_INP_SO(stcb->sctp_ep);
720		atomic_add_int(&stcb->asoc.refcnt, 1);
721		SCTP_TCB_UNLOCK(stcb);
722		SCTP_SOCKET_LOCK(so, 1);
723		SCTP_TCB_LOCK(stcb);
724		atomic_subtract_int(&stcb->asoc.refcnt, 1);
725		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
726			/* assoc was freed while we were unlocked */
727			SCTP_SOCKET_UNLOCK(so, 1);
728			return;
729		}
730#endif
731		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
732#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
733		SCTP_SOCKET_UNLOCK(so, 1);
734#endif
735	}
736	/* goto SHUTDOWN_RECEIVED state to block new requests */
737	if (stcb->sctp_socket) {
738		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
739		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
740		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
741			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
742			/*
743			 * notify upper layer that peer has initiated a
744			 * shutdown
745			 */
746			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
747
748			/* reset time */
749			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
750		}
751	}
752	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
753		/*
754		 * stop the shutdown timer, since we WILL move to
755		 * SHUTDOWN-ACK-SENT.
756		 */
757		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
758	}
759	/* Now is there unsent data on a stream somewhere? */
760	some_on_streamwheel = sctp_is_there_unsent_data(stcb);
761
762	if (!TAILQ_EMPTY(&asoc->send_queue) ||
763	    !TAILQ_EMPTY(&asoc->sent_queue) ||
764	    some_on_streamwheel) {
765		/* By returning we will push more data out */
766		return;
767	} else {
768		/* no outstanding data to send, so move on... */
769		/* send SHUTDOWN-ACK */
770		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
771		/* move to SHUTDOWN-ACK-SENT state */
772		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
773		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
774			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
775		}
776		SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
777
778		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net,
779		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
780		/* start SHUTDOWN timer */
781		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
782		    stcb, net);
783	}
784}
785
786static void
787sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
788    struct sctp_tcb *stcb, struct sctp_nets *net)
789{
790	struct sctp_association *asoc;
791
792#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
793	struct socket *so;
794
795	so = SCTP_INP_SO(stcb->sctp_ep);
796#endif
797	SCTPDBG(SCTP_DEBUG_INPUT2,
798	    "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
799	if (stcb == NULL)
800		return;
801
802	asoc = &stcb->asoc;
803	/* process according to association state */
804	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
805	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
806		/* unexpected SHUTDOWN-ACK... so ignore... */
807		SCTP_TCB_UNLOCK(stcb);
808		return;
809	}
810	if (asoc->control_pdapi) {
811		/*
812		 * With a normal shutdown we assume the end of last record.
813		 */
814		SCTP_INP_READ_LOCK(stcb->sctp_ep);
815		asoc->control_pdapi->end_added = 1;
816		asoc->control_pdapi->pdapi_aborted = 1;
817		asoc->control_pdapi = NULL;
818		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
819#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
820		atomic_add_int(&stcb->asoc.refcnt, 1);
821		SCTP_TCB_UNLOCK(stcb);
822		SCTP_SOCKET_LOCK(so, 1);
823		SCTP_TCB_LOCK(stcb);
824		atomic_subtract_int(&stcb->asoc.refcnt, 1);
825		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
826			/* assoc was freed while we were unlocked */
827			SCTP_SOCKET_UNLOCK(so, 1);
828			return;
829		}
830#endif
831		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
832#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
833		SCTP_SOCKET_UNLOCK(so, 1);
834#endif
835	}
836	/* are the queues empty? */
837	if (!TAILQ_EMPTY(&asoc->send_queue) ||
838	    !TAILQ_EMPTY(&asoc->sent_queue) ||
839	    !TAILQ_EMPTY(&asoc->out_wheel)) {
840		sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
841	}
842	/* stop the timer */
843	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
844	/* send SHUTDOWN-COMPLETE */
845	sctp_send_shutdown_complete(stcb, net);
846	/* notify upper layer protocol */
847	if (stcb->sctp_socket) {
848		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
849		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
850		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
851			/* Set the connected flag to disconnected */
852			stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
853		}
854	}
855	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
856	/* free the TCB but first save off the ep */
857#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
858	atomic_add_int(&stcb->asoc.refcnt, 1);
859	SCTP_TCB_UNLOCK(stcb);
860	SCTP_SOCKET_LOCK(so, 1);
861	SCTP_TCB_LOCK(stcb);
862	atomic_subtract_int(&stcb->asoc.refcnt, 1);
863#endif
864	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
865	    SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
866#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
867	SCTP_SOCKET_UNLOCK(so, 1);
868#endif
869}
870
871/*
872 * Skip past the param header and then we will find the chunk that caused the
873 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
874 * our peer must be broken.
875 */
876static void
877sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
878    struct sctp_nets *net)
879{
880	struct sctp_chunkhdr *chk;
881
882	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
883	switch (chk->chunk_type) {
884	case SCTP_ASCONF_ACK:
885	case SCTP_ASCONF:
886		sctp_asconf_cleanup(stcb, net);
887		break;
888	case SCTP_FORWARD_CUM_TSN:
889		stcb->asoc.peer_supports_prsctp = 0;
890		break;
891	default:
892		SCTPDBG(SCTP_DEBUG_INPUT2,
893		    "Peer does not support chunk type %d(%x)??\n",
894		    chk->chunk_type, (uint32_t) chk->chunk_type);
895		break;
896	}
897}
898
899/*
900 * Skip past the param header and then we will find the param that caused the
901 * problem.  There are a number of param's in a ASCONF OR the prsctp param
902 * these will turn of specific features.
903 */
904static void
905sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
906{
907	struct sctp_paramhdr *pbad;
908
909	pbad = phdr + 1;
910	switch (ntohs(pbad->param_type)) {
911		/* pr-sctp draft */
912	case SCTP_PRSCTP_SUPPORTED:
913		stcb->asoc.peer_supports_prsctp = 0;
914		break;
915	case SCTP_SUPPORTED_CHUNK_EXT:
916		break;
917		/* draft-ietf-tsvwg-addip-sctp */
918	case SCTP_ECN_NONCE_SUPPORTED:
919		stcb->asoc.peer_supports_ecn_nonce = 0;
920		stcb->asoc.ecn_nonce_allowed = 0;
921		stcb->asoc.ecn_allowed = 0;
922		break;
923	case SCTP_ADD_IP_ADDRESS:
924	case SCTP_DEL_IP_ADDRESS:
925	case SCTP_SET_PRIM_ADDR:
926		stcb->asoc.peer_supports_asconf = 0;
927		break;
928	case SCTP_SUCCESS_REPORT:
929	case SCTP_ERROR_CAUSE_IND:
930		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
931		SCTPDBG(SCTP_DEBUG_INPUT2,
932		    "Turning off ASCONF to this strange peer\n");
933		stcb->asoc.peer_supports_asconf = 0;
934		break;
935	default:
936		SCTPDBG(SCTP_DEBUG_INPUT2,
937		    "Peer does not support param type %d(%x)??\n",
938		    pbad->param_type, (uint32_t) pbad->param_type);
939		break;
940	}
941}
942
943static int
944sctp_handle_error(struct sctp_chunkhdr *ch,
945    struct sctp_tcb *stcb, struct sctp_nets *net)
946{
947	int chklen;
948	struct sctp_paramhdr *phdr;
949	uint16_t error_type;
950	uint16_t error_len;
951	struct sctp_association *asoc;
952	int adjust;
953
954#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
955	struct socket *so;
956
957#endif
958
959	/* parse through all of the errors and process */
960	asoc = &stcb->asoc;
961	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
962	    sizeof(struct sctp_chunkhdr));
963	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
964	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
965		/* Process an Error Cause */
966		error_type = ntohs(phdr->param_type);
967		error_len = ntohs(phdr->param_length);
968		if ((error_len > chklen) || (error_len == 0)) {
969			/* invalid param length for this param */
970			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
971			    chklen, error_len);
972			return (0);
973		}
974		switch (error_type) {
975		case SCTP_CAUSE_INVALID_STREAM:
976		case SCTP_CAUSE_MISSING_PARAM:
977		case SCTP_CAUSE_INVALID_PARAM:
978		case SCTP_CAUSE_NO_USER_DATA:
979			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
980			    error_type);
981			break;
982		case SCTP_CAUSE_STALE_COOKIE:
983			/*
984			 * We only act if we have echoed a cookie and are
985			 * waiting.
986			 */
987			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
988				int *p;
989
990				p = (int *)((caddr_t)phdr + sizeof(*phdr));
991				/* Save the time doubled */
992				asoc->cookie_preserve_req = ntohl(*p) << 1;
993				asoc->stale_cookie_count++;
994				if (asoc->stale_cookie_count >
995				    asoc->max_init_times) {
996					sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
997					/* now free the asoc */
998#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
999					so = SCTP_INP_SO(stcb->sctp_ep);
1000					atomic_add_int(&stcb->asoc.refcnt, 1);
1001					SCTP_TCB_UNLOCK(stcb);
1002					SCTP_SOCKET_LOCK(so, 1);
1003					SCTP_TCB_LOCK(stcb);
1004					atomic_subtract_int(&stcb->asoc.refcnt, 1);
1005#endif
1006					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1007					    SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1008#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1009					SCTP_SOCKET_UNLOCK(so, 1);
1010#endif
1011					return (-1);
1012				}
1013				/* blast back to INIT state */
1014				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1015				asoc->state |= SCTP_STATE_COOKIE_WAIT;
1016
1017				sctp_stop_all_cookie_timers(stcb);
1018				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1019			}
1020			break;
1021		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1022			/*
1023			 * Nothing we can do here, we don't do hostname
1024			 * addresses so if the peer does not like my IPv6
1025			 * (or IPv4 for that matter) it does not matter. If
1026			 * they don't support that type of address, they can
1027			 * NOT possibly get that packet type... i.e. with no
1028			 * IPv6 you can't recieve a IPv6 packet. so we can
1029			 * safely ignore this one. If we ever added support
1030			 * for HOSTNAME Addresses, then we would need to do
1031			 * something here.
1032			 */
1033			break;
1034		case SCTP_CAUSE_UNRECOG_CHUNK:
1035			sctp_process_unrecog_chunk(stcb, phdr, net);
1036			break;
1037		case SCTP_CAUSE_UNRECOG_PARAM:
1038			sctp_process_unrecog_param(stcb, phdr);
1039			break;
1040		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1041			/*
1042			 * We ignore this since the timer will drive out a
1043			 * new cookie anyway and there timer will drive us
1044			 * to send a SHUTDOWN_COMPLETE. We can't send one
1045			 * here since we don't have their tag.
1046			 */
1047			break;
1048		case SCTP_CAUSE_DELETING_LAST_ADDR:
1049		case SCTP_CAUSE_RESOURCE_SHORTAGE:
1050		case SCTP_CAUSE_DELETING_SRC_ADDR:
1051			/*
1052			 * We should NOT get these here, but in a
1053			 * ASCONF-ACK.
1054			 */
1055			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1056			    error_type);
1057			break;
1058		case SCTP_CAUSE_OUT_OF_RESC:
1059			/*
1060			 * And what, pray tell do we do with the fact that
1061			 * the peer is out of resources? Not really sure we
1062			 * could do anything but abort. I suspect this
1063			 * should have came WITH an abort instead of in a
1064			 * OP-ERROR.
1065			 */
1066			break;
1067		default:
1068			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1069			    error_type);
1070			break;
1071		}
1072		adjust = SCTP_SIZE32(error_len);
1073		chklen -= adjust;
1074		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1075	}
1076	return (0);
1077}
1078
1079static int
1080sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1081    struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1082    struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
1083{
1084	struct sctp_init_ack *init_ack;
1085	struct mbuf *op_err;
1086
1087	SCTPDBG(SCTP_DEBUG_INPUT2,
1088	    "sctp_handle_init_ack: handling INIT-ACK\n");
1089
1090	if (stcb == NULL) {
1091		SCTPDBG(SCTP_DEBUG_INPUT2,
1092		    "sctp_handle_init_ack: TCB is null\n");
1093		return (-1);
1094	}
1095	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1096		/* Invalid length */
1097		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1098		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1099		    op_err, 0);
1100		*abort_no_unlock = 1;
1101		return (-1);
1102	}
1103	init_ack = &cp->init;
1104	/* validate parameters */
1105	if (init_ack->initiate_tag == 0) {
1106		/* protocol error... send an abort */
1107		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1108		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1109		    op_err, 0);
1110		*abort_no_unlock = 1;
1111		return (-1);
1112	}
1113	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1114		/* protocol error... send an abort */
1115		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1116		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1117		    op_err, 0);
1118		*abort_no_unlock = 1;
1119		return (-1);
1120	}
1121	if (init_ack->num_inbound_streams == 0) {
1122		/* protocol error... send an abort */
1123		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1124		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1125		    op_err, 0);
1126		*abort_no_unlock = 1;
1127		return (-1);
1128	}
1129	if (init_ack->num_outbound_streams == 0) {
1130		/* protocol error... send an abort */
1131		op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
1132		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
1133		    op_err, 0);
1134		*abort_no_unlock = 1;
1135		return (-1);
1136	}
1137	/* process according to association state... */
1138	switch (stcb->asoc.state & SCTP_STATE_MASK) {
1139	case SCTP_STATE_COOKIE_WAIT:
1140		/* this is the expected state for this chunk */
1141		/* process the INIT-ACK parameters */
1142		if (stcb->asoc.primary_destination->dest_state &
1143		    SCTP_ADDR_UNCONFIRMED) {
1144			/*
1145			 * The primary is where we sent the INIT, we can
1146			 * always consider it confirmed when the INIT-ACK is
1147			 * returned. Do this before we load addresses
1148			 * though.
1149			 */
1150			stcb->asoc.primary_destination->dest_state &=
1151			    ~SCTP_ADDR_UNCONFIRMED;
1152			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1153			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1154		}
1155		if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
1156		    net, abort_no_unlock, vrf_id) < 0) {
1157			/* error in parsing parameters */
1158			return (-1);
1159		}
1160		/* update our state */
1161		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1162		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1163
1164		/* reset the RTO calc */
1165		if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
1166			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1167			    stcb->asoc.overall_error_count,
1168			    0,
1169			    SCTP_FROM_SCTP_INPUT,
1170			    __LINE__);
1171		}
1172		stcb->asoc.overall_error_count = 0;
1173		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1174		/*
1175		 * collapse the init timer back in case of a exponential
1176		 * backoff
1177		 */
1178		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1179		    stcb, net);
1180		/*
1181		 * the send at the end of the inbound data processing will
1182		 * cause the cookie to be sent
1183		 */
1184		break;
1185	case SCTP_STATE_SHUTDOWN_SENT:
1186		/* incorrect state... discard */
1187		break;
1188	case SCTP_STATE_COOKIE_ECHOED:
1189		/* incorrect state... discard */
1190		break;
1191	case SCTP_STATE_OPEN:
1192		/* incorrect state... discard */
1193		break;
1194	case SCTP_STATE_EMPTY:
1195	case SCTP_STATE_INUSE:
1196	default:
1197		/* incorrect state... discard */
1198		return (-1);
1199		break;
1200	}
1201	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1202	return (0);
1203}
1204
1205
1206/*
1207 * handle a state cookie for an existing association m: input packet mbuf
1208 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1209 * "split" mbuf and the cookie signature does not exist offset: offset into
1210 * mbuf to the cookie-echo chunk
1211 */
1212static struct sctp_tcb *
1213sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1214    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1215    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
1216    struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
1217    uint32_t vrf_id)
1218{
1219	struct sctp_association *asoc;
1220	struct sctp_init_chunk *init_cp, init_buf;
1221	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1222	int chk_length;
1223	int init_offset, initack_offset, i;
1224	int retval;
1225	int spec_flag = 0;
1226	uint32_t how_indx;
1227
1228	/* I know that the TCB is non-NULL from the caller */
1229	asoc = &stcb->asoc;
1230	for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1231		if (asoc->cookie_how[how_indx] == 0)
1232			break;
1233	}
1234	if (how_indx < sizeof(asoc->cookie_how)) {
1235		asoc->cookie_how[how_indx] = 1;
1236	}
1237	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1238		/* SHUTDOWN came in after sending INIT-ACK */
1239		struct mbuf *op_err;
1240		struct sctp_paramhdr *ph;
1241
1242		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1243		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1244		    0, M_DONTWAIT, 1, MT_DATA);
1245		if (op_err == NULL) {
1246			/* FOOBAR */
1247			return (NULL);
1248		}
1249		/* pre-reserve some space */
1250		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1251		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1252		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1253		/* Set the len */
1254		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1255		ph = mtod(op_err, struct sctp_paramhdr *);
1256		ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1257		ph->param_length = htons(sizeof(struct sctp_paramhdr));
1258		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1259		    vrf_id);
1260		if (how_indx < sizeof(asoc->cookie_how))
1261			asoc->cookie_how[how_indx] = 2;
1262		return (NULL);
1263	}
1264	/*
1265	 * find and validate the INIT chunk in the cookie (peer's info) the
1266	 * INIT should start after the cookie-echo header struct (chunk
1267	 * header, state cookie header struct)
1268	 */
1269	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1270
1271	init_cp = (struct sctp_init_chunk *)
1272	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1273	    (uint8_t *) & init_buf);
1274	if (init_cp == NULL) {
1275		/* could not pull a INIT chunk in cookie */
1276		return (NULL);
1277	}
1278	chk_length = ntohs(init_cp->ch.chunk_length);
1279	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1280		return (NULL);
1281	}
1282	/*
1283	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1284	 * INIT-ACK follows the INIT chunk
1285	 */
1286	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1287	initack_cp = (struct sctp_init_ack_chunk *)
1288	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1289	    (uint8_t *) & initack_buf);
1290	if (initack_cp == NULL) {
1291		/* could not pull INIT-ACK chunk in cookie */
1292		return (NULL);
1293	}
1294	chk_length = ntohs(initack_cp->ch.chunk_length);
1295	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1296		return (NULL);
1297	}
1298	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1299	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1300		/*
1301		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1302		 * to get into the OPEN state
1303		 */
1304		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1305			/*-
1306			 * Opps, this means that we somehow generated two vtag's
1307			 * the same. I.e. we did:
1308			 *  Us               Peer
1309			 *   <---INIT(tag=a)------
1310			 *   ----INIT-ACK(tag=t)-->
1311			 *   ----INIT(tag=t)------> *1
1312			 *   <---INIT-ACK(tag=a)---
1313                         *   <----CE(tag=t)------------- *2
1314			 *
1315			 * At point *1 we should be generating a different
1316			 * tag t'. Which means we would throw away the CE and send
1317			 * ours instead. Basically this is case C (throw away side).
1318			 */
1319			if (how_indx < sizeof(asoc->cookie_how))
1320				asoc->cookie_how[how_indx] = 17;
1321			return (NULL);
1322
1323		}
1324		switch SCTP_GET_STATE
1325			(asoc) {
1326		case SCTP_STATE_COOKIE_WAIT:
1327		case SCTP_STATE_COOKIE_ECHOED:
1328			/*
1329			 * INIT was sent but got a COOKIE_ECHO with the
1330			 * correct tags... just accept it...but we must
1331			 * process the init so that we can make sure we have
1332			 * the right seq no's.
1333			 */
1334			/* First we must process the INIT !! */
1335			retval = sctp_process_init(init_cp, stcb, net);
1336			if (retval < 0) {
1337				if (how_indx < sizeof(asoc->cookie_how))
1338					asoc->cookie_how[how_indx] = 3;
1339				return (NULL);
1340			}
1341			/* we have already processed the INIT so no problem */
1342			sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1343			    net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1344			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1345			/* update current state */
1346			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1347				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1348			else
1349				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1350
1351			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1352			if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1353				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1354				    stcb->sctp_ep, stcb, asoc->primary_destination);
1355			}
1356			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1357			sctp_stop_all_cookie_timers(stcb);
1358			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1359			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1360			    (inp->sctp_socket->so_qlimit == 0)
1361			    ) {
1362#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1363				struct socket *so;
1364
1365#endif
1366				/*
1367				 * Here is where collision would go if we
1368				 * did a connect() and instead got a
1369				 * init/init-ack/cookie done before the
1370				 * init-ack came back..
1371				 */
1372				stcb->sctp_ep->sctp_flags |=
1373				    SCTP_PCB_FLAGS_CONNECTED;
1374#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1375				so = SCTP_INP_SO(stcb->sctp_ep);
1376				atomic_add_int(&stcb->asoc.refcnt, 1);
1377				SCTP_TCB_UNLOCK(stcb);
1378				SCTP_SOCKET_LOCK(so, 1);
1379				SCTP_TCB_LOCK(stcb);
1380				atomic_add_int(&stcb->asoc.refcnt, -1);
1381				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1382					SCTP_SOCKET_UNLOCK(so, 1);
1383					return (NULL);
1384				}
1385#endif
1386				soisconnected(stcb->sctp_socket);
1387#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1388				SCTP_SOCKET_UNLOCK(so, 1);
1389#endif
1390			}
1391			/* notify upper layer */
1392			*notification = SCTP_NOTIFY_ASSOC_UP;
1393			/*
1394			 * since we did not send a HB make sure we don't
1395			 * double things
1396			 */
1397			net->hb_responded = 1;
1398			net->RTO = sctp_calculate_rto(stcb, asoc, net,
1399			    &cookie->time_entered, sctp_align_unsafe_makecopy);
1400
1401			if (stcb->asoc.sctp_autoclose_ticks &&
1402			    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1403				sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1404				    inp, stcb, NULL);
1405			}
1406			break;
1407		default:
1408			/*
1409			 * we're in the OPEN state (or beyond), so peer must
1410			 * have simply lost the COOKIE-ACK
1411			 */
1412			break;
1413			}	/* end switch */
1414		sctp_stop_all_cookie_timers(stcb);
1415		/*
1416		 * We ignore the return code here.. not sure if we should
1417		 * somehow abort.. but we do have an existing asoc. This
1418		 * really should not fail.
1419		 */
1420		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1421		    init_offset + sizeof(struct sctp_init_chunk),
1422		    initack_offset, sh, init_src)) {
1423			if (how_indx < sizeof(asoc->cookie_how))
1424				asoc->cookie_how[how_indx] = 4;
1425			return (NULL);
1426		}
1427		/* respond with a COOKIE-ACK */
1428		sctp_toss_old_cookies(stcb, asoc);
1429		sctp_send_cookie_ack(stcb);
1430		if (how_indx < sizeof(asoc->cookie_how))
1431			asoc->cookie_how[how_indx] = 5;
1432		return (stcb);
1433	}
1434	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1435	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1436	    cookie->tie_tag_my_vtag == 0 &&
1437	    cookie->tie_tag_peer_vtag == 0) {
1438		/*
1439		 * case C in Section 5.2.4 Table 2: XMOO silently discard
1440		 */
1441		if (how_indx < sizeof(asoc->cookie_how))
1442			asoc->cookie_how[how_indx] = 6;
1443		return (NULL);
1444	}
1445	if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
1446	    (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
1447	    init_cp->init.initiate_tag == 0)) {
1448		/*
1449		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1450		 * should be ok, re-accept peer info
1451		 */
1452		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1453			/*
1454			 * Extension of case C. If we hit this, then the
1455			 * random number generator returned the same vtag
1456			 * when we first sent our INIT-ACK and when we later
1457			 * sent our INIT. The side with the seq numbers that
1458			 * are different will be the one that normnally
1459			 * would have hit case C. This in effect "extends"
1460			 * our vtags in this collision case to be 64 bits.
1461			 * The same collision could occur aka you get both
1462			 * vtag and seq number the same twice in a row.. but
1463			 * is much less likely. If it did happen then we
1464			 * would proceed through and bring up the assoc.. we
1465			 * may end up with the wrong stream setup however..
1466			 * which would be bad.. but there is no way to
1467			 * tell.. until we send on a stream that does not
1468			 * exist :-)
1469			 */
1470			if (how_indx < sizeof(asoc->cookie_how))
1471				asoc->cookie_how[how_indx] = 7;
1472
1473			return (NULL);
1474		}
1475		if (how_indx < sizeof(asoc->cookie_how))
1476			asoc->cookie_how[how_indx] = 8;
1477		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1478		sctp_stop_all_cookie_timers(stcb);
1479		/*
1480		 * since we did not send a HB make sure we don't double
1481		 * things
1482		 */
1483		net->hb_responded = 1;
1484		if (stcb->asoc.sctp_autoclose_ticks &&
1485		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1486			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1487			    NULL);
1488		}
1489		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1490		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1491
1492		/* Note last_cwr_tsn? where is this used? */
1493		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1494		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1495			/*
1496			 * Ok the peer probably discarded our data (if we
1497			 * echoed a cookie+data). So anything on the
1498			 * sent_queue should be marked for retransmit, we
1499			 * may not get something to kick us so it COULD
1500			 * still take a timeout to move these.. but it can't
1501			 * hurt to mark them.
1502			 */
1503			struct sctp_tmit_chunk *chk;
1504
1505			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1506				if (chk->sent < SCTP_DATAGRAM_RESEND) {
1507					chk->sent = SCTP_DATAGRAM_RESEND;
1508					sctp_flight_size_decrease(chk);
1509					sctp_total_flight_decrease(stcb, chk);
1510					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1511					spec_flag++;
1512				}
1513			}
1514
1515		}
1516		/* process the INIT info (peer's info) */
1517		retval = sctp_process_init(init_cp, stcb, net);
1518		if (retval < 0) {
1519			if (how_indx < sizeof(asoc->cookie_how))
1520				asoc->cookie_how[how_indx] = 9;
1521			return (NULL);
1522		}
1523		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1524		    init_offset + sizeof(struct sctp_init_chunk),
1525		    initack_offset, sh, init_src)) {
1526			if (how_indx < sizeof(asoc->cookie_how))
1527				asoc->cookie_how[how_indx] = 10;
1528			return (NULL);
1529		}
1530		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1531		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1532			*notification = SCTP_NOTIFY_ASSOC_UP;
1533
1534			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1535			    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1536			    (inp->sctp_socket->so_qlimit == 0)) {
1537#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1538				struct socket *so;
1539
1540#endif
1541				stcb->sctp_ep->sctp_flags |=
1542				    SCTP_PCB_FLAGS_CONNECTED;
1543#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1544				so = SCTP_INP_SO(stcb->sctp_ep);
1545				atomic_add_int(&stcb->asoc.refcnt, 1);
1546				SCTP_TCB_UNLOCK(stcb);
1547				SCTP_SOCKET_LOCK(so, 1);
1548				SCTP_TCB_LOCK(stcb);
1549				atomic_add_int(&stcb->asoc.refcnt, -1);
1550				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1551					SCTP_SOCKET_UNLOCK(so, 1);
1552					return (NULL);
1553				}
1554#endif
1555				soisconnected(stcb->sctp_socket);
1556#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1557				SCTP_SOCKET_UNLOCK(so, 1);
1558#endif
1559			}
1560			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1561				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1562			else
1563				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1564			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1565		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1566			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1567		} else {
1568			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1569		}
1570		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1571		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1572			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1573			    stcb->sctp_ep, stcb, asoc->primary_destination);
1574		}
1575		sctp_stop_all_cookie_timers(stcb);
1576		sctp_toss_old_cookies(stcb, asoc);
1577		sctp_send_cookie_ack(stcb);
1578		if (spec_flag) {
1579			/*
1580			 * only if we have retrans set do we do this. What
1581			 * this call does is get only the COOKIE-ACK out and
1582			 * then when we return the normal call to
1583			 * sctp_chunk_output will get the retrans out behind
1584			 * this.
1585			 */
1586			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1587		}
1588		if (how_indx < sizeof(asoc->cookie_how))
1589			asoc->cookie_how[how_indx] = 11;
1590
1591		return (stcb);
1592	}
1593	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1594	    ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1595	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1596	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1597	    cookie->tie_tag_peer_vtag != 0) {
1598		struct sctpasochead *head;
1599
1600		/*
1601		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1602		 */
1603		/* temp code */
1604		if (how_indx < sizeof(asoc->cookie_how))
1605			asoc->cookie_how[how_indx] = 12;
1606		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1607		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1608
1609		*sac_assoc_id = sctp_get_associd(stcb);
1610		/* notify upper layer */
1611		*notification = SCTP_NOTIFY_ASSOC_RESTART;
1612		atomic_add_int(&stcb->asoc.refcnt, 1);
1613		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1614		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1615		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1616			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1617		}
1618		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1619			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1620		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1621			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1622		}
1623		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1624			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1625			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1626			    stcb->sctp_ep, stcb, asoc->primary_destination);
1627
1628		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1629			/* move to OPEN state, if not in SHUTDOWN_SENT */
1630			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1631		}
1632		asoc->pre_open_streams =
1633		    ntohs(initack_cp->init.num_outbound_streams);
1634		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1635		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1636
1637		asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1638		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1639
1640		asoc->str_reset_seq_in = asoc->init_seq_number;
1641
1642		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1643		if (asoc->mapping_array) {
1644			memset(asoc->mapping_array, 0,
1645			    asoc->mapping_array_size);
1646		}
1647		SCTP_TCB_UNLOCK(stcb);
1648		SCTP_INP_INFO_WLOCK();
1649		SCTP_INP_WLOCK(stcb->sctp_ep);
1650		SCTP_TCB_LOCK(stcb);
1651		atomic_add_int(&stcb->asoc.refcnt, -1);
1652		/* send up all the data */
1653		SCTP_TCB_SEND_LOCK(stcb);
1654
1655		sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED);
1656		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1657			stcb->asoc.strmout[i].stream_no = i;
1658			stcb->asoc.strmout[i].next_sequence_sent = 0;
1659			stcb->asoc.strmout[i].last_msg_incomplete = 0;
1660		}
1661		/* process the INIT-ACK info (my info) */
1662		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1663		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1664
1665		/* pull from vtag hash */
1666		LIST_REMOVE(stcb, sctp_asocs);
1667		/* re-insert to new vtag position */
1668		head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1669		    sctppcbinfo.hashasocmark)];
1670		/*
1671		 * put it in the bucket in the vtag hash of assoc's for the
1672		 * system
1673		 */
1674		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1675
1676		/* Is this the first restart? */
1677		if (stcb->asoc.in_restart_hash == 0) {
1678			/* Ok add it to assoc_id vtag hash */
1679			head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
1680			    sctppcbinfo.hashrestartmark)];
1681			LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
1682			stcb->asoc.in_restart_hash = 1;
1683		}
1684		/* process the INIT info (peer's info) */
1685		SCTP_TCB_SEND_UNLOCK(stcb);
1686		SCTP_INP_WUNLOCK(stcb->sctp_ep);
1687		SCTP_INP_INFO_WUNLOCK();
1688
1689		retval = sctp_process_init(init_cp, stcb, net);
1690		if (retval < 0) {
1691			if (how_indx < sizeof(asoc->cookie_how))
1692				asoc->cookie_how[how_indx] = 13;
1693
1694			return (NULL);
1695		}
1696		/*
1697		 * since we did not send a HB make sure we don't double
1698		 * things
1699		 */
1700		net->hb_responded = 1;
1701
1702		if (sctp_load_addresses_from_init(stcb, m, iphlen,
1703		    init_offset + sizeof(struct sctp_init_chunk),
1704		    initack_offset, sh, init_src)) {
1705			if (how_indx < sizeof(asoc->cookie_how))
1706				asoc->cookie_how[how_indx] = 14;
1707
1708			return (NULL);
1709		}
1710		/* respond with a COOKIE-ACK */
1711		sctp_stop_all_cookie_timers(stcb);
1712		sctp_toss_old_cookies(stcb, asoc);
1713		sctp_send_cookie_ack(stcb);
1714		if (how_indx < sizeof(asoc->cookie_how))
1715			asoc->cookie_how[how_indx] = 15;
1716
1717		return (stcb);
1718	}
1719	if (how_indx < sizeof(asoc->cookie_how))
1720		asoc->cookie_how[how_indx] = 16;
1721	/* all other cases... */
1722	return (NULL);
1723}
1724
1725
1726/*
1727 * handle a state cookie for a new association m: input packet mbuf chain--
1728 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1729 * and the cookie signature does not exist offset: offset into mbuf to the
1730 * cookie-echo chunk length: length of the cookie chunk to: where the init
1731 * was from returns a new TCB
1732 */
1733static struct sctp_tcb *
1734sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1735    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1736    struct sctp_inpcb *inp, struct sctp_nets **netp,
1737    struct sockaddr *init_src, int *notification,
1738    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1739    uint32_t vrf_id)
1740{
1741	struct sctp_tcb *stcb;
1742	struct sctp_init_chunk *init_cp, init_buf;
1743	struct sctp_init_ack_chunk *initack_cp, initack_buf;
1744	struct sockaddr_storage sa_store;
1745	struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1746	struct sockaddr_in *sin;
1747	struct sockaddr_in6 *sin6;
1748	struct sctp_association *asoc;
1749	int chk_length;
1750	int init_offset, initack_offset, initack_limit;
1751	int retval;
1752	int error = 0;
1753	uint32_t old_tag;
1754	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1755
1756#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1757	struct socket *so;
1758
1759	so = SCTP_INP_SO(inp);
1760#endif
1761
1762	/*
1763	 * find and validate the INIT chunk in the cookie (peer's info) the
1764	 * INIT should start after the cookie-echo header struct (chunk
1765	 * header, state cookie header struct)
1766	 */
1767	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1768	init_cp = (struct sctp_init_chunk *)
1769	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1770	    (uint8_t *) & init_buf);
1771	if (init_cp == NULL) {
1772		/* could not pull a INIT chunk in cookie */
1773		SCTPDBG(SCTP_DEBUG_INPUT1,
1774		    "process_cookie_new: could not pull INIT chunk hdr\n");
1775		return (NULL);
1776	}
1777	chk_length = ntohs(init_cp->ch.chunk_length);
1778	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1779		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1780		return (NULL);
1781	}
1782	initack_offset = init_offset + SCTP_SIZE32(chk_length);
1783	/*
1784	 * find and validate the INIT-ACK chunk in the cookie (my info) the
1785	 * INIT-ACK follows the INIT chunk
1786	 */
1787	initack_cp = (struct sctp_init_ack_chunk *)
1788	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1789	    (uint8_t *) & initack_buf);
1790	if (initack_cp == NULL) {
1791		/* could not pull INIT-ACK chunk in cookie */
1792		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1793		return (NULL);
1794	}
1795	chk_length = ntohs(initack_cp->ch.chunk_length);
1796	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1797		return (NULL);
1798	}
1799	/*
1800	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1801	 * "initack_limit" value.  This is because the chk_length field
1802	 * includes the length of the cookie, but the cookie is omitted when
1803	 * the INIT and INIT_ACK are tacked onto the cookie...
1804	 */
1805	initack_limit = offset + cookie_len;
1806
1807	/*
1808	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
1809	 * and popluate
1810	 */
1811
1812	/*
1813	 * Here we do a trick, we set in NULL for the proc/thread argument.
1814	 * We do this since in effect we only use the p argument when the
1815	 * socket is unbound and we must do an implicit bind. Since we are
1816	 * getting a cookie, we cannot be unbound.
1817	 */
1818	stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
1819	    ntohl(initack_cp->init.initiate_tag), vrf_id,
1820	    (struct thread *)NULL
1821	    );
1822	if (stcb == NULL) {
1823		struct mbuf *op_err;
1824
1825		/* memory problem? */
1826		SCTPDBG(SCTP_DEBUG_INPUT1,
1827		    "process_cookie_new: no room for another TCB!\n");
1828		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1829
1830		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1831		    sh, op_err, vrf_id);
1832		return (NULL);
1833	}
1834	/* get the correct sctp_nets */
1835	if (netp)
1836		*netp = sctp_findnet(stcb, init_src);
1837
1838	asoc = &stcb->asoc;
1839	/* get scope variables out of cookie */
1840	asoc->ipv4_local_scope = cookie->ipv4_scope;
1841	asoc->site_scope = cookie->site_scope;
1842	asoc->local_scope = cookie->local_scope;
1843	asoc->loopback_scope = cookie->loopback_scope;
1844
1845	if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
1846	    (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
1847		struct mbuf *op_err;
1848
1849		/*
1850		 * Houston we have a problem. The EP changed while the
1851		 * cookie was in flight. Only recourse is to abort the
1852		 * association.
1853		 */
1854		atomic_add_int(&stcb->asoc.refcnt, 1);
1855		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1856		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1857		    sh, op_err, vrf_id);
1858#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1859		SCTP_TCB_UNLOCK(stcb);
1860		SCTP_SOCKET_LOCK(so, 1);
1861		SCTP_TCB_LOCK(stcb);
1862#endif
1863		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1864		    SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1865#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1866		SCTP_SOCKET_UNLOCK(so, 1);
1867#endif
1868		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1869		return (NULL);
1870	}
1871	/* process the INIT-ACK info (my info) */
1872	old_tag = asoc->my_vtag;
1873	asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1874	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1875	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1876	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1877	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1878	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1879	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1880	asoc->str_reset_seq_in = asoc->init_seq_number;
1881
1882	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1883
1884	/* process the INIT info (peer's info) */
1885	if (netp)
1886		retval = sctp_process_init(init_cp, stcb, *netp);
1887	else
1888		retval = 0;
1889	if (retval < 0) {
1890		atomic_add_int(&stcb->asoc.refcnt, 1);
1891#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1892		SCTP_TCB_UNLOCK(stcb);
1893		SCTP_SOCKET_LOCK(so, 1);
1894		SCTP_TCB_LOCK(stcb);
1895#endif
1896		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1897#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1898		SCTP_SOCKET_UNLOCK(so, 1);
1899#endif
1900		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1901		return (NULL);
1902	}
1903	/* load all addresses */
1904	if (sctp_load_addresses_from_init(stcb, m, iphlen,
1905	    init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
1906	    init_src)) {
1907		atomic_add_int(&stcb->asoc.refcnt, 1);
1908#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1909		SCTP_TCB_UNLOCK(stcb);
1910		SCTP_SOCKET_LOCK(so, 1);
1911		SCTP_TCB_LOCK(stcb);
1912#endif
1913		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1914#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1915		SCTP_SOCKET_UNLOCK(so, 1);
1916#endif
1917		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1918		return (NULL);
1919	}
1920	/*
1921	 * verify any preceding AUTH chunk that was skipped
1922	 */
1923	/* pull the local authentication parameters from the cookie/init-ack */
1924	sctp_auth_get_cookie_params(stcb, m,
1925	    initack_offset + sizeof(struct sctp_init_ack_chunk),
1926	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
1927	if (auth_skipped) {
1928		struct sctp_auth_chunk *auth;
1929
1930		auth = (struct sctp_auth_chunk *)
1931		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
1932		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
1933			/* auth HMAC failed, dump the assoc and packet */
1934			SCTPDBG(SCTP_DEBUG_AUTH1,
1935			    "COOKIE-ECHO: AUTH failed\n");
1936			atomic_add_int(&stcb->asoc.refcnt, 1);
1937#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1938			SCTP_TCB_UNLOCK(stcb);
1939			SCTP_SOCKET_LOCK(so, 1);
1940			SCTP_TCB_LOCK(stcb);
1941#endif
1942			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
1943#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1944			SCTP_SOCKET_UNLOCK(so, 1);
1945#endif
1946			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1947			return (NULL);
1948		} else {
1949			/* remaining chunks checked... good to go */
1950			stcb->asoc.authenticated = 1;
1951		}
1952	}
1953	/* update current state */
1954	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
1955	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1956	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1957		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1958		    stcb->sctp_ep, stcb, asoc->primary_destination);
1959	}
1960	sctp_stop_all_cookie_timers(stcb);
1961	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
1962	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1963
1964	/*
1965	 * if we're doing ASCONFs, check to see if we have any new local
1966	 * addresses that need to get added to the peer (eg. addresses
1967	 * changed while cookie echo in flight).  This needs to be done
1968	 * after we go to the OPEN state to do the correct asconf
1969	 * processing. else, make sure we have the correct addresses in our
1970	 * lists
1971	 */
1972
1973	/* warning, we re-use sin, sin6, sa_store here! */
1974	/* pull in local_address (our "from" address) */
1975	if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
1976		/* source addr is IPv4 */
1977		sin = (struct sockaddr_in *)initack_src;
1978		memset(sin, 0, sizeof(*sin));
1979		sin->sin_family = AF_INET;
1980		sin->sin_len = sizeof(struct sockaddr_in);
1981		sin->sin_addr.s_addr = cookie->laddress[0];
1982	} else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
1983		/* source addr is IPv6 */
1984		sin6 = (struct sockaddr_in6 *)initack_src;
1985		memset(sin6, 0, sizeof(*sin6));
1986		sin6->sin6_family = AF_INET6;
1987		sin6->sin6_len = sizeof(struct sockaddr_in6);
1988		sin6->sin6_scope_id = cookie->scope_id;
1989		memcpy(&sin6->sin6_addr, cookie->laddress,
1990		    sizeof(sin6->sin6_addr));
1991	} else {
1992		atomic_add_int(&stcb->asoc.refcnt, 1);
1993#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1994		SCTP_TCB_UNLOCK(stcb);
1995		SCTP_SOCKET_LOCK(so, 1);
1996		SCTP_TCB_LOCK(stcb);
1997#endif
1998		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
1999#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2000		SCTP_SOCKET_UNLOCK(so, 1);
2001#endif
2002		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2003		return (NULL);
2004	}
2005
2006	/* set up to notify upper layer */
2007	*notification = SCTP_NOTIFY_ASSOC_UP;
2008	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2009	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2010	    (inp->sctp_socket->so_qlimit == 0)) {
2011		/*
2012		 * This is an endpoint that called connect() how it got a
2013		 * cookie that is NEW is a bit of a mystery. It must be that
2014		 * the INIT was sent, but before it got there.. a complete
2015		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2016		 * should have went to the other code.. not here.. oh well..
2017		 * a bit of protection is worth having..
2018		 */
2019		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2020#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2021		atomic_add_int(&stcb->asoc.refcnt, 1);
2022		SCTP_TCB_UNLOCK(stcb);
2023		SCTP_SOCKET_LOCK(so, 1);
2024		SCTP_TCB_LOCK(stcb);
2025		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2026		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2027			SCTP_SOCKET_UNLOCK(so, 1);
2028			return (NULL);
2029		}
2030#endif
2031		soisconnected(stcb->sctp_socket);
2032#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2033		SCTP_SOCKET_UNLOCK(so, 1);
2034#endif
2035	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2036	    (inp->sctp_socket->so_qlimit)) {
2037		/*
2038		 * We don't want to do anything with this one. Since it is
2039		 * the listening guy. The timer will get started for
2040		 * accepted connections in the caller.
2041		 */
2042		;
2043	}
2044	/* since we did not send a HB make sure we don't double things */
2045	if ((netp) && (*netp))
2046		(*netp)->hb_responded = 1;
2047
2048	if (stcb->asoc.sctp_autoclose_ticks &&
2049	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2050		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2051	}
2052	/* calculate the RTT */
2053	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2054	if ((netp) && (*netp)) {
2055		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2056		    &cookie->time_entered, sctp_align_unsafe_makecopy);
2057	}
2058	/* respond with a COOKIE-ACK */
2059	sctp_send_cookie_ack(stcb);
2060
2061	/*
2062	 * check the address lists for any ASCONFs that need to be sent
2063	 * AFTER the cookie-ack is sent
2064	 */
2065	sctp_check_address_list(stcb, m,
2066	    initack_offset + sizeof(struct sctp_init_ack_chunk),
2067	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2068	    initack_src, cookie->local_scope, cookie->site_scope,
2069	    cookie->ipv4_scope, cookie->loopback_scope);
2070
2071
2072	return (stcb);
2073}
2074
2075
2076/*
2077 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2078 * existing (non-NULL) TCB
2079 */
2080static struct mbuf *
2081sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2082    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2083    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2084    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2085    struct sctp_tcb **locked_tcb, uint32_t vrf_id)
2086{
2087	struct sctp_state_cookie *cookie;
2088	struct sockaddr_in6 sin6;
2089	struct sockaddr_in sin;
2090	struct sctp_tcb *l_stcb = *stcb;
2091	struct sctp_inpcb *l_inp;
2092	struct sockaddr *to;
2093	sctp_assoc_t sac_restart_id;
2094	struct sctp_pcb *ep;
2095	struct mbuf *m_sig;
2096	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2097	uint8_t *sig;
2098	uint8_t cookie_ok = 0;
2099	unsigned int size_of_pkt, sig_offset, cookie_offset;
2100	unsigned int cookie_len;
2101	struct timeval now;
2102	struct timeval time_expires;
2103	struct sockaddr_storage dest_store;
2104	struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
2105	struct ip *iph;
2106	int notification = 0;
2107	struct sctp_nets *netl;
2108	int had_a_existing_tcb = 0;
2109
2110	SCTPDBG(SCTP_DEBUG_INPUT2,
2111	    "sctp_handle_cookie: handling COOKIE-ECHO\n");
2112
2113	if (inp_p == NULL) {
2114		return (NULL);
2115	}
2116	/* First get the destination address setup too. */
2117	iph = mtod(m, struct ip *);
2118	if (iph->ip_v == IPVERSION) {
2119		/* its IPv4 */
2120		struct sockaddr_in *lsin;
2121
2122		lsin = (struct sockaddr_in *)(localep_sa);
2123		memset(lsin, 0, sizeof(*lsin));
2124		lsin->sin_family = AF_INET;
2125		lsin->sin_len = sizeof(*lsin);
2126		lsin->sin_port = sh->dest_port;
2127		lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
2128		size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
2129	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
2130		/* its IPv6 */
2131		struct ip6_hdr *ip6;
2132		struct sockaddr_in6 *lsin6;
2133
2134		lsin6 = (struct sockaddr_in6 *)(localep_sa);
2135		memset(lsin6, 0, sizeof(*lsin6));
2136		lsin6->sin6_family = AF_INET6;
2137		lsin6->sin6_len = sizeof(struct sockaddr_in6);
2138		ip6 = mtod(m, struct ip6_hdr *);
2139		lsin6->sin6_port = sh->dest_port;
2140		lsin6->sin6_addr = ip6->ip6_dst;
2141		size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
2142	} else {
2143		return (NULL);
2144	}
2145
2146	cookie = &cp->cookie;
2147	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2148	cookie_len = ntohs(cp->ch.chunk_length);
2149
2150	if ((cookie->peerport != sh->src_port) &&
2151	    (cookie->myport != sh->dest_port) &&
2152	    (cookie->my_vtag != sh->v_tag)) {
2153		/*
2154		 * invalid ports or bad tag.  Note that we always leave the
2155		 * v_tag in the header in network order and when we stored
2156		 * it in the my_vtag slot we also left it in network order.
2157		 * This maintains the match even though it may be in the
2158		 * opposite byte order of the machine :->
2159		 */
2160		return (NULL);
2161	}
2162	if (cookie_len > size_of_pkt ||
2163	    cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2164	    sizeof(struct sctp_init_chunk) +
2165	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2166		/* cookie too long!  or too small */
2167		return (NULL);
2168	}
2169	/*
2170	 * split off the signature into its own mbuf (since it should not be
2171	 * calculated in the sctp_hmac_m() call).
2172	 */
2173	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2174	if (sig_offset > size_of_pkt) {
2175		/* packet not correct size! */
2176		/* XXX this may already be accounted for earlier... */
2177		return (NULL);
2178	}
2179	m_sig = m_split(m, sig_offset, M_DONTWAIT);
2180	if (m_sig == NULL) {
2181		/* out of memory or ?? */
2182		return (NULL);
2183	}
2184	/*
2185	 * compute the signature/digest for the cookie
2186	 */
2187	ep = &(*inp_p)->sctp_ep;
2188	l_inp = *inp_p;
2189	if (l_stcb) {
2190		SCTP_TCB_UNLOCK(l_stcb);
2191	}
2192	SCTP_INP_RLOCK(l_inp);
2193	if (l_stcb) {
2194		SCTP_TCB_LOCK(l_stcb);
2195	}
2196	/* which cookie is it? */
2197	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2198	    (ep->current_secret_number != ep->last_secret_number)) {
2199		/* it's the old cookie */
2200		(void)sctp_hmac_m(SCTP_HMAC,
2201		    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2202		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2203	} else {
2204		/* it's the current cookie */
2205		(void)sctp_hmac_m(SCTP_HMAC,
2206		    (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
2207		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2208	}
2209	/* get the signature */
2210	SCTP_INP_RUNLOCK(l_inp);
2211	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2212	if (sig == NULL) {
2213		/* couldn't find signature */
2214		sctp_m_freem(m_sig);
2215		return (NULL);
2216	}
2217	/* compare the received digest with the computed digest */
2218	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2219		/* try the old cookie? */
2220		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2221		    (ep->current_secret_number != ep->last_secret_number)) {
2222			/* compute digest with old */
2223			(void)sctp_hmac_m(SCTP_HMAC,
2224			    (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
2225			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2226			/* compare */
2227			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2228				cookie_ok = 1;
2229		}
2230	} else {
2231		cookie_ok = 1;
2232	}
2233
2234	/*
2235	 * Now before we continue we must reconstruct our mbuf so that
2236	 * normal processing of any other chunks will work.
2237	 */
2238	{
2239		struct mbuf *m_at;
2240
2241		m_at = m;
2242		while (SCTP_BUF_NEXT(m_at) != NULL) {
2243			m_at = SCTP_BUF_NEXT(m_at);
2244		}
2245		SCTP_BUF_NEXT(m_at) = m_sig;
2246	}
2247
2248	if (cookie_ok == 0) {
2249		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2250		SCTPDBG(SCTP_DEBUG_INPUT2,
2251		    "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2252		    (uint32_t) offset, cookie_offset, sig_offset);
2253		return (NULL);
2254	}
2255	/*
2256	 * check the cookie timestamps to be sure it's not stale
2257	 */
2258	(void)SCTP_GETTIME_TIMEVAL(&now);
2259	/* Expire time is in Ticks, so we convert to seconds */
2260	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2261	time_expires.tv_usec = cookie->time_entered.tv_usec;
2262	if (timevalcmp(&now, &time_expires, >)) {
2263		/* cookie is stale! */
2264		struct mbuf *op_err;
2265		struct sctp_stale_cookie_msg *scm;
2266		uint32_t tim;
2267
2268		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
2269		    0, M_DONTWAIT, 1, MT_DATA);
2270		if (op_err == NULL) {
2271			/* FOOBAR */
2272			return (NULL);
2273		}
2274		/* pre-reserve some space */
2275		SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
2276		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
2277		SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
2278
2279		/* Set the len */
2280		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
2281		scm = mtod(op_err, struct sctp_stale_cookie_msg *);
2282		scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
2283		scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
2284		    (sizeof(uint32_t))));
2285		/* seconds to usec */
2286		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2287		/* add in usec */
2288		if (tim == 0)
2289			tim = now.tv_usec - cookie->time_entered.tv_usec;
2290		scm->time_usec = htonl(tim);
2291		sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
2292		    vrf_id);
2293		return (NULL);
2294	}
2295	/*
2296	 * Now we must see with the lookup address if we have an existing
2297	 * asoc. This will only happen if we were in the COOKIE-WAIT state
2298	 * and a INIT collided with us and somewhere the peer sent the
2299	 * cookie on another address besides the single address our assoc
2300	 * had for him. In this case we will have one of the tie-tags set at
2301	 * least AND the address field in the cookie can be used to look it
2302	 * up.
2303	 */
2304	to = NULL;
2305	if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
2306		memset(&sin6, 0, sizeof(sin6));
2307		sin6.sin6_family = AF_INET6;
2308		sin6.sin6_len = sizeof(sin6);
2309		sin6.sin6_port = sh->src_port;
2310		sin6.sin6_scope_id = cookie->scope_id;
2311		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2312		    sizeof(sin6.sin6_addr.s6_addr));
2313		to = (struct sockaddr *)&sin6;
2314	} else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
2315		memset(&sin, 0, sizeof(sin));
2316		sin.sin_family = AF_INET;
2317		sin.sin_len = sizeof(sin);
2318		sin.sin_port = sh->src_port;
2319		sin.sin_addr.s_addr = cookie->address[0];
2320		to = (struct sockaddr *)&sin;
2321	} else {
2322		/* This should not happen */
2323		return (NULL);
2324	}
2325	if ((*stcb == NULL) && to) {
2326		/* Yep, lets check */
2327		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
2328		if (*stcb == NULL) {
2329			/*
2330			 * We should have only got back the same inp. If we
2331			 * got back a different ep we have a problem. The
2332			 * original findep got back l_inp and now
2333			 */
2334			if (l_inp != *inp_p) {
2335				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2336			}
2337		} else {
2338			if (*locked_tcb == NULL) {
2339				/*
2340				 * In this case we found the assoc only
2341				 * after we locked the create lock. This
2342				 * means we are in a colliding case and we
2343				 * must make sure that we unlock the tcb if
2344				 * its one of the cases where we throw away
2345				 * the incoming packets.
2346				 */
2347				*locked_tcb = *stcb;
2348
2349				/*
2350				 * We must also increment the inp ref count
2351				 * since the ref_count flags was set when we
2352				 * did not find the TCB, now we found it
2353				 * which reduces the refcount.. we must
2354				 * raise it back out to balance it all :-)
2355				 */
2356				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2357				if ((*stcb)->sctp_ep != l_inp) {
2358					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2359					    (*stcb)->sctp_ep, l_inp);
2360				}
2361			}
2362		}
2363	}
2364	if (to == NULL)
2365		return (NULL);
2366
2367	cookie_len -= SCTP_SIGNATURE_SIZE;
2368	if (*stcb == NULL) {
2369		/* this is the "normal" case... get a new TCB */
2370		*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2371		    cookie_len, *inp_p, netp, to, &notification,
2372		    auth_skipped, auth_offset, auth_len, vrf_id);
2373	} else {
2374		/* this is abnormal... cookie-echo on existing TCB */
2375		had_a_existing_tcb = 1;
2376		*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2377		    cookie, cookie_len, *inp_p, *stcb, *netp, to,
2378		    &notification, &sac_restart_id, vrf_id);
2379	}
2380
2381	if (*stcb == NULL) {
2382		/* still no TCB... must be bad cookie-echo */
2383		return (NULL);
2384	}
2385	/*
2386	 * Ok, we built an association so confirm the address we sent the
2387	 * INIT-ACK to.
2388	 */
2389	netl = sctp_findnet(*stcb, to);
2390	/*
2391	 * This code should in theory NOT run but
2392	 */
2393	if (netl == NULL) {
2394		/* TSNH! Huh, why do I need to add this address here? */
2395		int ret;
2396
2397		ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2398		    SCTP_IN_COOKIE_PROC);
2399		netl = sctp_findnet(*stcb, to);
2400	}
2401	if (netl) {
2402		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2403			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2404			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2405			    netl);
2406			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2407			    (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2408		}
2409	}
2410	if (*stcb) {
2411		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2412		    *stcb, NULL);
2413	}
2414	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2415		if (!had_a_existing_tcb ||
2416		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2417			/*
2418			 * If we have a NEW cookie or the connect never
2419			 * reached the connected state during collision we
2420			 * must do the TCP accept thing.
2421			 */
2422			struct socket *so, *oso;
2423			struct sctp_inpcb *inp;
2424
2425			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2426				/*
2427				 * For a restart we will keep the same
2428				 * socket, no need to do anything. I THINK!!
2429				 */
2430				sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED);
2431				return (m);
2432			}
2433			oso = (*inp_p)->sctp_socket;
2434			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2435			SCTP_TCB_UNLOCK((*stcb));
2436			so = sonewconn(oso, 0
2437			    );
2438			SCTP_TCB_LOCK((*stcb));
2439			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2440
2441			if (so == NULL) {
2442				struct mbuf *op_err;
2443
2444#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2445				struct socket *pcb_so;
2446
2447#endif
2448				/* Too many sockets */
2449				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2450				op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2451				sctp_abort_association(*inp_p, NULL, m, iphlen,
2452				    sh, op_err, vrf_id);
2453#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2454				pcb_so = SCTP_INP_SO(*inp_p);
2455				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2456				SCTP_TCB_UNLOCK((*stcb));
2457				SCTP_SOCKET_LOCK(pcb_so, 1);
2458				SCTP_TCB_LOCK((*stcb));
2459				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2460#endif
2461				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2462#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2463				SCTP_SOCKET_UNLOCK(pcb_so, 1);
2464#endif
2465				return (NULL);
2466			}
2467			inp = (struct sctp_inpcb *)so->so_pcb;
2468			SCTP_INP_INCR_REF(inp);
2469			/*
2470			 * We add the unbound flag here so that if we get an
2471			 * soabort() before we get the move_pcb done, we
2472			 * will properly cleanup.
2473			 */
2474			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2475			    SCTP_PCB_FLAGS_CONNECTED |
2476			    SCTP_PCB_FLAGS_IN_TCPPOOL |
2477			    SCTP_PCB_FLAGS_UNBOUND |
2478			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2479			    SCTP_PCB_FLAGS_DONT_WAKE);
2480			inp->sctp_features = (*inp_p)->sctp_features;
2481			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2482			inp->sctp_socket = so;
2483			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2484			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2485			inp->sctp_context = (*inp_p)->sctp_context;
2486			inp->inp_starting_point_for_iterator = NULL;
2487			/*
2488			 * copy in the authentication parameters from the
2489			 * original endpoint
2490			 */
2491			if (inp->sctp_ep.local_hmacs)
2492				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2493			inp->sctp_ep.local_hmacs =
2494			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2495			if (inp->sctp_ep.local_auth_chunks)
2496				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2497			inp->sctp_ep.local_auth_chunks =
2498			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2499			(void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
2500			    &inp->sctp_ep.shared_keys);
2501
2502			/*
2503			 * Now we must move it from one hash table to
2504			 * another and get the tcb in the right place.
2505			 */
2506			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2507
2508			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2509			SCTP_TCB_UNLOCK((*stcb));
2510
2511			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
2512			SCTP_TCB_LOCK((*stcb));
2513			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2514
2515
2516			/*
2517			 * now we must check to see if we were aborted while
2518			 * the move was going on and the lock/unlock
2519			 * happened.
2520			 */
2521			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2522				/*
2523				 * yep it was, we leave the assoc attached
2524				 * to the socket since the sctp_inpcb_free()
2525				 * call will send an abort for us.
2526				 */
2527				SCTP_INP_DECR_REF(inp);
2528				return (NULL);
2529			}
2530			SCTP_INP_DECR_REF(inp);
2531			/* Switch over to the new guy */
2532			*inp_p = inp;
2533			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2534
2535			/*
2536			 * Pull it from the incomplete queue and wake the
2537			 * guy
2538			 */
2539#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2540			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2541			SCTP_TCB_UNLOCK((*stcb));
2542			SCTP_SOCKET_LOCK(so, 1);
2543#endif
2544			soisconnected(so);
2545#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2546			SCTP_TCB_LOCK((*stcb));
2547			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2548			SCTP_SOCKET_UNLOCK(so, 1);
2549#endif
2550			return (m);
2551		}
2552	}
2553	if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2554		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2555	}
2556	return (m);
2557}
2558
2559static void
2560sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2561    struct sctp_tcb *stcb, struct sctp_nets *net)
2562{
2563	/* cp must not be used, others call this without a c-ack :-) */
2564	struct sctp_association *asoc;
2565
2566	SCTPDBG(SCTP_DEBUG_INPUT2,
2567	    "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2568	if (stcb == NULL)
2569		return;
2570
2571	asoc = &stcb->asoc;
2572
2573	sctp_stop_all_cookie_timers(stcb);
2574	/* process according to association state */
2575	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2576		/* state change only needed when I am in right state */
2577		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2578		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2579		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2580			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2581			    stcb->sctp_ep, stcb, asoc->primary_destination);
2582
2583		}
2584		/* update RTO */
2585		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2586		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2587		if (asoc->overall_error_count == 0) {
2588			net->RTO = sctp_calculate_rto(stcb, asoc, net,
2589			    &asoc->time_entered, sctp_align_safe_nocopy);
2590		}
2591		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2592		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2593		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2594		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2595#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2596			struct socket *so;
2597
2598#endif
2599			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2600#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2601			so = SCTP_INP_SO(stcb->sctp_ep);
2602			atomic_add_int(&stcb->asoc.refcnt, 1);
2603			SCTP_TCB_UNLOCK(stcb);
2604			SCTP_SOCKET_LOCK(so, 1);
2605			SCTP_TCB_LOCK(stcb);
2606			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2607			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2608				SCTP_SOCKET_UNLOCK(so, 1);
2609				return;
2610			}
2611#endif
2612			soisconnected(stcb->sctp_socket);
2613#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2614			SCTP_SOCKET_UNLOCK(so, 1);
2615#endif
2616		}
2617		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2618		    stcb, net);
2619		/*
2620		 * since we did not send a HB make sure we don't double
2621		 * things
2622		 */
2623		net->hb_responded = 1;
2624
2625		if (stcb->asoc.sctp_autoclose_ticks &&
2626		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2627			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2628			    stcb->sctp_ep, stcb, NULL);
2629		}
2630		/*
2631		 * send ASCONF if parameters are pending and ASCONFs are
2632		 * allowed (eg. addresses changed when init/cookie echo were
2633		 * in flight)
2634		 */
2635		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2636		    (stcb->asoc.peer_supports_asconf) &&
2637		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2638#ifdef SCTP_TIMER_BASED_ASCONF
2639			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2640			    stcb->sctp_ep, stcb,
2641			    stcb->asoc.primary_destination);
2642#else
2643			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
2644			    SCTP_ADDR_NOT_LOCKED);
2645#endif
2646		}
2647	}
2648	/* Toss the cookie if I can */
2649	sctp_toss_old_cookies(stcb, asoc);
2650	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2651		/* Restart the timer if we have pending data */
2652		struct sctp_tmit_chunk *chk;
2653
2654		chk = TAILQ_FIRST(&asoc->sent_queue);
2655		if (chk) {
2656			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2657			    stcb, chk->whoTo);
2658		}
2659	}
2660}
2661
2662static void
2663sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2664    struct sctp_tcb *stcb)
2665{
2666	struct sctp_nets *net;
2667	struct sctp_tmit_chunk *lchk;
2668	uint32_t tsn;
2669
2670	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2671		return;
2672	}
2673	SCTP_STAT_INCR(sctps_recvecne);
2674	tsn = ntohl(cp->tsn);
2675	/* ECN Nonce stuff: need a resync and disable the nonce sum check */
2676	/* Also we make sure we disable the nonce_wait */
2677	lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2678	if (lchk == NULL) {
2679		stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2680	} else {
2681		stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2682	}
2683	stcb->asoc.nonce_wait_for_ecne = 0;
2684	stcb->asoc.nonce_sum_check = 0;
2685
2686	/* Find where it was sent, if possible */
2687	net = NULL;
2688	lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2689	while (lchk) {
2690		if (lchk->rec.data.TSN_seq == tsn) {
2691			net = lchk->whoTo;
2692			break;
2693		}
2694		if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2695			break;
2696		lchk = TAILQ_NEXT(lchk, sctp_next);
2697	}
2698	if (net == NULL)
2699		/* default is we use the primary */
2700		net = stcb->asoc.primary_destination;
2701
2702	if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2703		/*
2704		 * JRS - Use the congestion control given in the pluggable
2705		 * CC module
2706		 */
2707		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net);
2708		/*
2709		 * we reduce once every RTT. So we will only lower cwnd at
2710		 * the next sending seq i.e. the resync_tsn.
2711		 */
2712		stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2713	}
2714	/*
2715	 * We always send a CWR this way if our previous one was lost our
2716	 * peer will get an update, or if it is not time again to reduce we
2717	 * still get the cwr to the peer.
2718	 */
2719	sctp_send_cwr(stcb, net, tsn);
2720}
2721
2722static void
2723sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2724{
2725	/*
2726	 * Here we get a CWR from the peer. We must look in the outqueue and
2727	 * make sure that we have a covered ECNE in teh control chunk part.
2728	 * If so remove it.
2729	 */
2730	struct sctp_tmit_chunk *chk;
2731	struct sctp_ecne_chunk *ecne;
2732
2733	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2734		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2735			continue;
2736		}
2737		/*
2738		 * Look for and remove if it is the right TSN. Since there
2739		 * is only ONE ECNE on the control queue at any one time we
2740		 * don't need to worry about more than one!
2741		 */
2742		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2743		if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2744		    MAX_TSN) || (cp->tsn == ecne->tsn)) {
2745			/* this covers this ECNE, we can remove it */
2746			stcb->asoc.ecn_echo_cnt_onq--;
2747			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2748			    sctp_next);
2749			if (chk->data) {
2750				sctp_m_freem(chk->data);
2751				chk->data = NULL;
2752			}
2753			stcb->asoc.ctrl_queue_cnt--;
2754			sctp_free_a_chunk(stcb, chk);
2755			break;
2756		}
2757	}
2758}
2759
2760static void
2761sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2762    struct sctp_tcb *stcb, struct sctp_nets *net)
2763{
2764	struct sctp_association *asoc;
2765
2766#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2767	struct socket *so;
2768
2769#endif
2770
2771	SCTPDBG(SCTP_DEBUG_INPUT2,
2772	    "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
2773	if (stcb == NULL)
2774		return;
2775
2776	asoc = &stcb->asoc;
2777	/* process according to association state */
2778	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
2779		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
2780		SCTPDBG(SCTP_DEBUG_INPUT2,
2781		    "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
2782		SCTP_TCB_UNLOCK(stcb);
2783		return;
2784	}
2785	/* notify upper layer protocol */
2786	if (stcb->sctp_socket) {
2787		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2788		/* are the queues empty? they should be */
2789		if (!TAILQ_EMPTY(&asoc->send_queue) ||
2790		    !TAILQ_EMPTY(&asoc->sent_queue) ||
2791		    !TAILQ_EMPTY(&asoc->out_wheel)) {
2792			sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
2793		}
2794	}
2795	/* stop the timer */
2796	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2797	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
2798	/* free the TCB */
2799	SCTPDBG(SCTP_DEBUG_INPUT2,
2800	    "sctp_handle_shutdown_complete: calls free-asoc\n");
2801#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2802	so = SCTP_INP_SO(stcb->sctp_ep);
2803	atomic_add_int(&stcb->asoc.refcnt, 1);
2804	SCTP_TCB_UNLOCK(stcb);
2805	SCTP_SOCKET_LOCK(so, 1);
2806	SCTP_TCB_LOCK(stcb);
2807	atomic_subtract_int(&stcb->asoc.refcnt, 1);
2808#endif
2809	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2810#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2811	SCTP_SOCKET_UNLOCK(so, 1);
2812#endif
2813	return;
2814}
2815
2816static int
2817process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
2818    struct sctp_nets *net, uint8_t flg)
2819{
2820	switch (desc->chunk_type) {
2821		case SCTP_DATA:
2822		/* find the tsn to resend (possibly */
2823		{
2824			uint32_t tsn;
2825			struct sctp_tmit_chunk *tp1;
2826
2827			tsn = ntohl(desc->tsn_ifany);
2828			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2829			while (tp1) {
2830				if (tp1->rec.data.TSN_seq == tsn) {
2831					/* found it */
2832					break;
2833				}
2834				if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
2835				    MAX_TSN)) {
2836					/* not found */
2837					tp1 = NULL;
2838					break;
2839				}
2840				tp1 = TAILQ_NEXT(tp1, sctp_next);
2841			}
2842			if (tp1 == NULL) {
2843				/*
2844				 * Do it the other way , aka without paying
2845				 * attention to queue seq order.
2846				 */
2847				SCTP_STAT_INCR(sctps_pdrpdnfnd);
2848				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2849				while (tp1) {
2850					if (tp1->rec.data.TSN_seq == tsn) {
2851						/* found it */
2852						break;
2853					}
2854					tp1 = TAILQ_NEXT(tp1, sctp_next);
2855				}
2856			}
2857			if (tp1 == NULL) {
2858				SCTP_STAT_INCR(sctps_pdrptsnnf);
2859			}
2860			if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
2861				uint8_t *ddp;
2862
2863				if ((stcb->asoc.peers_rwnd == 0) &&
2864				    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
2865					SCTP_STAT_INCR(sctps_pdrpdiwnp);
2866					return (0);
2867				}
2868				if (stcb->asoc.peers_rwnd == 0 &&
2869				    (flg & SCTP_FROM_MIDDLE_BOX)) {
2870					SCTP_STAT_INCR(sctps_pdrpdizrw);
2871					return (0);
2872				}
2873				ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
2874				    sizeof(struct sctp_data_chunk));
2875				{
2876					unsigned int iii;
2877
2878					for (iii = 0; iii < sizeof(desc->data_bytes);
2879					    iii++) {
2880						if (ddp[iii] != desc->data_bytes[iii]) {
2881							SCTP_STAT_INCR(sctps_pdrpbadd);
2882							return (-1);
2883						}
2884					}
2885				}
2886				/*
2887				 * We zero out the nonce so resync not
2888				 * needed
2889				 */
2890				tp1->rec.data.ect_nonce = 0;
2891
2892				if (tp1->do_rtt) {
2893					/*
2894					 * this guy had a RTO calculation
2895					 * pending on it, cancel it
2896					 */
2897					tp1->do_rtt = 0;
2898				}
2899				SCTP_STAT_INCR(sctps_pdrpmark);
2900				if (tp1->sent != SCTP_DATAGRAM_RESEND)
2901					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2902				tp1->sent = SCTP_DATAGRAM_RESEND;
2903				/*
2904				 * mark it as if we were doing a FR, since
2905				 * we will be getting gap ack reports behind
2906				 * the info from the router.
2907				 */
2908				tp1->rec.data.doing_fast_retransmit = 1;
2909				/*
2910				 * mark the tsn with what sequences can
2911				 * cause a new FR.
2912				 */
2913				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
2914					tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
2915				} else {
2916					tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
2917				}
2918
2919				/* restart the timer */
2920				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2921				    stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
2922				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2923				    stcb, tp1->whoTo);
2924
2925				/* fix counts and things */
2926				if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
2927					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
2928					    tp1->whoTo->flight_size,
2929					    tp1->book_size,
2930					    (uintptr_t) stcb,
2931					    tp1->rec.data.TSN_seq);
2932				}
2933				sctp_flight_size_decrease(tp1);
2934				sctp_total_flight_decrease(stcb, tp1);
2935			} {
2936				/* audit code */
2937				unsigned int audit;
2938
2939				audit = 0;
2940				TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
2941					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2942						audit++;
2943				}
2944				TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
2945				    sctp_next) {
2946					if (tp1->sent == SCTP_DATAGRAM_RESEND)
2947						audit++;
2948				}
2949				if (audit != stcb->asoc.sent_queue_retran_cnt) {
2950					SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
2951					    audit, stcb->asoc.sent_queue_retran_cnt);
2952#ifndef SCTP_AUDITING_ENABLED
2953					stcb->asoc.sent_queue_retran_cnt = audit;
2954#endif
2955				}
2956			}
2957		}
2958		break;
2959	case SCTP_ASCONF:
2960		{
2961			struct sctp_tmit_chunk *asconf;
2962
2963			TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
2964			    sctp_next) {
2965				if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
2966					break;
2967				}
2968			}
2969			if (asconf) {
2970				if (asconf->sent != SCTP_DATAGRAM_RESEND)
2971					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2972				asconf->sent = SCTP_DATAGRAM_RESEND;
2973				asconf->snd_count--;
2974			}
2975		}
2976		break;
2977	case SCTP_INITIATION:
2978		/* resend the INIT */
2979		stcb->asoc.dropped_special_cnt++;
2980		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
2981			/*
2982			 * If we can get it in, in a few attempts we do
2983			 * this, otherwise we let the timer fire.
2984			 */
2985			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
2986			    stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
2987			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
2988		}
2989		break;
2990	case SCTP_SELECTIVE_ACK:
2991		/* resend the sack */
2992		sctp_send_sack(stcb);
2993		break;
2994	case SCTP_HEARTBEAT_REQUEST:
2995		/* resend a demand HB */
2996		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
2997			/*
2998			 * Only retransmit if we KNOW we wont destroy the
2999			 * tcb
3000			 */
3001			(void)sctp_send_hb(stcb, 1, net);
3002		}
3003		break;
3004	case SCTP_SHUTDOWN:
3005		sctp_send_shutdown(stcb, net);
3006		break;
3007	case SCTP_SHUTDOWN_ACK:
3008		sctp_send_shutdown_ack(stcb, net);
3009		break;
3010	case SCTP_COOKIE_ECHO:
3011		{
3012			struct sctp_tmit_chunk *cookie;
3013
3014			cookie = NULL;
3015			TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3016			    sctp_next) {
3017				if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3018					break;
3019				}
3020			}
3021			if (cookie) {
3022				if (cookie->sent != SCTP_DATAGRAM_RESEND)
3023					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3024				cookie->sent = SCTP_DATAGRAM_RESEND;
3025				sctp_stop_all_cookie_timers(stcb);
3026			}
3027		}
3028		break;
3029	case SCTP_COOKIE_ACK:
3030		sctp_send_cookie_ack(stcb);
3031		break;
3032	case SCTP_ASCONF_ACK:
3033		/* resend last asconf ack */
3034		sctp_send_asconf_ack(stcb);
3035		break;
3036	case SCTP_FORWARD_CUM_TSN:
3037		send_forward_tsn(stcb, &stcb->asoc);
3038		break;
3039		/* can't do anything with these */
3040	case SCTP_PACKET_DROPPED:
3041	case SCTP_INITIATION_ACK:	/* this should not happen */
3042	case SCTP_HEARTBEAT_ACK:
3043	case SCTP_ABORT_ASSOCIATION:
3044	case SCTP_OPERATION_ERROR:
3045	case SCTP_SHUTDOWN_COMPLETE:
3046	case SCTP_ECN_ECHO:
3047	case SCTP_ECN_CWR:
3048	default:
3049		break;
3050	}
3051	return (0);
3052}
3053
3054void
3055sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3056{
3057	int i;
3058	uint16_t temp;
3059
3060	/*
3061	 * We set things to 0xffff since this is the last delivered sequence
3062	 * and we will be sending in 0 after the reset.
3063	 */
3064
3065	if (number_entries) {
3066		for (i = 0; i < number_entries; i++) {
3067			temp = ntohs(list[i]);
3068			if (temp >= stcb->asoc.streamincnt) {
3069				continue;
3070			}
3071			stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
3072		}
3073	} else {
3074		list = NULL;
3075		for (i = 0; i < stcb->asoc.streamincnt; i++) {
3076			stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
3077		}
3078	}
3079	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3080}
3081
3082static void
3083sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
3084{
3085	int i;
3086
3087	if (number_entries == 0) {
3088		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3089			stcb->asoc.strmout[i].next_sequence_sent = 0;
3090		}
3091	} else if (number_entries) {
3092		for (i = 0; i < number_entries; i++) {
3093			uint16_t temp;
3094
3095			temp = ntohs(list[i]);
3096			if (temp >= stcb->asoc.streamoutcnt) {
3097				/* no such stream */
3098				continue;
3099			}
3100			stcb->asoc.strmout[temp].next_sequence_sent = 0;
3101		}
3102	}
3103	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3104}
3105
3106
3107struct sctp_stream_reset_out_request *
3108sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3109{
3110	struct sctp_association *asoc;
3111	struct sctp_stream_reset_out_req *req;
3112	struct sctp_stream_reset_out_request *r;
3113	struct sctp_tmit_chunk *chk;
3114	int len, clen;
3115
3116	asoc = &stcb->asoc;
3117	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3118		asoc->stream_reset_outstanding = 0;
3119		return (NULL);
3120	}
3121	if (stcb->asoc.str_reset == NULL) {
3122		asoc->stream_reset_outstanding = 0;
3123		return (NULL);
3124	}
3125	chk = stcb->asoc.str_reset;
3126	if (chk->data == NULL) {
3127		return (NULL);
3128	}
3129	if (bchk) {
3130		/* he wants a copy of the chk pointer */
3131		*bchk = chk;
3132	}
3133	clen = chk->send_size;
3134	req = mtod(chk->data, struct sctp_stream_reset_out_req *);
3135	r = &req->sr_req;
3136	if (ntohl(r->request_seq) == seq) {
3137		/* found it */
3138		return (r);
3139	}
3140	len = SCTP_SIZE32(ntohs(r->ph.param_length));
3141	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3142		/* move to the next one, there can only be a max of two */
3143		r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
3144		if (ntohl(r->request_seq) == seq) {
3145			return (r);
3146		}
3147	}
3148	/* that seq is not here */
3149	return (NULL);
3150}
3151
3152static void
3153sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3154{
3155	struct sctp_association *asoc;
3156	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3157
3158	if (stcb->asoc.str_reset == NULL) {
3159		return;
3160	}
3161	asoc = &stcb->asoc;
3162
3163	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3164	TAILQ_REMOVE(&asoc->control_send_queue,
3165	    chk,
3166	    sctp_next);
3167	if (chk->data) {
3168		sctp_m_freem(chk->data);
3169		chk->data = NULL;
3170	}
3171	asoc->ctrl_queue_cnt--;
3172	sctp_free_a_chunk(stcb, chk);
3173	/* sa_ignore NO_NULL_CHK */
3174	stcb->asoc.str_reset = NULL;
3175}
3176
3177
3178static int
3179sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3180    uint32_t seq, uint32_t action,
3181    struct sctp_stream_reset_response *respin)
3182{
3183	uint16_t type;
3184	int lparm_len;
3185	struct sctp_association *asoc = &stcb->asoc;
3186	struct sctp_tmit_chunk *chk;
3187	struct sctp_stream_reset_out_request *srparam;
3188	int number_entries;
3189
3190	if (asoc->stream_reset_outstanding == 0) {
3191		/* duplicate */
3192		return (0);
3193	}
3194	if (seq == stcb->asoc.str_reset_seq_out) {
3195		srparam = sctp_find_stream_reset(stcb, seq, &chk);
3196		if (srparam) {
3197			stcb->asoc.str_reset_seq_out++;
3198			type = ntohs(srparam->ph.param_type);
3199			lparm_len = ntohs(srparam->ph.param_length);
3200			if (type == SCTP_STR_RESET_OUT_REQUEST) {
3201				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3202				asoc->stream_reset_out_is_outstanding = 0;
3203				if (asoc->stream_reset_outstanding)
3204					asoc->stream_reset_outstanding--;
3205				if (action == SCTP_STREAM_RESET_PERFORMED) {
3206					/* do it */
3207					sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
3208				} else {
3209					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3210				}
3211			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
3212				/* Answered my request */
3213				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3214				if (asoc->stream_reset_outstanding)
3215					asoc->stream_reset_outstanding--;
3216				if (action != SCTP_STREAM_RESET_PERFORMED) {
3217					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
3218				}
3219			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3220				/**
3221				 * a) Adopt the new in tsn.
3222				 * b) reset the map
3223				 * c) Adopt the new out-tsn
3224				 */
3225				struct sctp_stream_reset_response_tsn *resp;
3226				struct sctp_forward_tsn_chunk fwdtsn;
3227				int abort_flag = 0;
3228
3229				if (respin == NULL) {
3230					/* huh ? */
3231					return (0);
3232				}
3233				if (action == SCTP_STREAM_RESET_PERFORMED) {
3234					resp = (struct sctp_stream_reset_response_tsn *)respin;
3235					asoc->stream_reset_outstanding--;
3236					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3237					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3238					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3239					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3240					if (abort_flag) {
3241						return (1);
3242					}
3243					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3244					stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3245					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3246					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3247					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3248					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3249
3250					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3251					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3252
3253				}
3254			}
3255			/* get rid of the request and get the request flags */
3256			if (asoc->stream_reset_outstanding == 0) {
3257				sctp_clean_up_stream_reset(stcb);
3258			}
3259		}
3260	}
3261	return (0);
3262}
3263
3264static void
3265sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3266    struct sctp_tmit_chunk *chk,
3267    struct sctp_stream_reset_in_request *req, int trunc)
3268{
3269	uint32_t seq;
3270	int len, i;
3271	int number_entries;
3272	uint16_t temp;
3273
3274	/*
3275	 * peer wants me to send a str-reset to him for my outgoing seq's if
3276	 * seq_in is right.
3277	 */
3278	struct sctp_association *asoc = &stcb->asoc;
3279
3280	seq = ntohl(req->request_seq);
3281	if (asoc->str_reset_seq_in == seq) {
3282		if (trunc) {
3283			/* Can't do it, since they exceeded our buffer size  */
3284			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3285			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3286			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3287		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3288			len = ntohs(req->ph.param_length);
3289			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3290			for (i = 0; i < number_entries; i++) {
3291				temp = ntohs(req->list_of_streams[i]);
3292				req->list_of_streams[i] = temp;
3293			}
3294			/* move the reset action back one */
3295			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3296			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3297			sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
3298			    asoc->str_reset_seq_out,
3299			    seq, (asoc->sending_seq - 1));
3300			asoc->stream_reset_out_is_outstanding = 1;
3301			asoc->str_reset = chk;
3302			sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
3303			stcb->asoc.stream_reset_outstanding++;
3304		} else {
3305			/* Can't do it, since we have sent one out */
3306			asoc->last_reset_action[1] = asoc->last_reset_action[0];
3307			asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
3308			sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3309		}
3310		asoc->str_reset_seq_in++;
3311	} else if (asoc->str_reset_seq_in - 1 == seq) {
3312		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3313	} else if (asoc->str_reset_seq_in - 2 == seq) {
3314		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3315	} else {
3316		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3317	}
3318}
3319
3320static int
3321sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
3322    struct sctp_tmit_chunk *chk,
3323    struct sctp_stream_reset_tsn_request *req)
3324{
3325	/* reset all in and out and update the tsn */
3326	/*
3327	 * A) reset my str-seq's on in and out. B) Select a receive next,
3328	 * and set cum-ack to it. Also process this selected number as a
3329	 * fwd-tsn as well. C) set in the response my next sending seq.
3330	 */
3331	struct sctp_forward_tsn_chunk fwdtsn;
3332	struct sctp_association *asoc = &stcb->asoc;
3333	int abort_flag = 0;
3334	uint32_t seq;
3335
3336	seq = ntohl(req->request_seq);
3337	if (asoc->str_reset_seq_in == seq) {
3338		fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3339		fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3340		fwdtsn.ch.chunk_flags = 0;
3341		fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
3342		sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3343		if (abort_flag) {
3344			return (1);
3345		}
3346		stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
3347		stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3348		stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
3349		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3350		atomic_add_int(&stcb->asoc.sending_seq, 1);
3351		/* save off historical data for retrans */
3352		stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
3353		stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
3354		stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
3355		stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
3356
3357		sctp_add_stream_reset_result_tsn(chk,
3358		    ntohl(req->request_seq),
3359		    SCTP_STREAM_RESET_PERFORMED,
3360		    stcb->asoc.sending_seq,
3361		    stcb->asoc.mapping_array_base_tsn);
3362		sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3363		sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3364		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
3365		stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3366
3367		asoc->str_reset_seq_in++;
3368	} else if (asoc->str_reset_seq_in - 1 == seq) {
3369		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
3370		    stcb->asoc.last_sending_seq[0],
3371		    stcb->asoc.last_base_tsnsent[0]
3372		    );
3373	} else if (asoc->str_reset_seq_in - 2 == seq) {
3374		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
3375		    stcb->asoc.last_sending_seq[1],
3376		    stcb->asoc.last_base_tsnsent[1]
3377		    );
3378	} else {
3379		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3380	}
3381	return (0);
3382}
3383
3384static void
3385sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
3386    struct sctp_tmit_chunk *chk,
3387    struct sctp_stream_reset_out_request *req, int trunc)
3388{
3389	uint32_t seq, tsn;
3390	int number_entries, len;
3391	struct sctp_association *asoc = &stcb->asoc;
3392
3393	seq = ntohl(req->request_seq);
3394
3395	/* now if its not a duplicate we process it */
3396	if (asoc->str_reset_seq_in == seq) {
3397		len = ntohs(req->ph.param_length);
3398		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
3399		/*
3400		 * the sender is resetting, handle the list issue.. we must
3401		 * a) verify if we can do the reset, if so no problem b) If
3402		 * we can't do the reset we must copy the request. c) queue
3403		 * it, and setup the data in processor to trigger it off
3404		 * when needed and dequeue all the queued data.
3405		 */
3406		tsn = ntohl(req->send_reset_at_tsn);
3407
3408		/* move the reset action back one */
3409		asoc->last_reset_action[1] = asoc->last_reset_action[0];
3410		if (trunc) {
3411			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3412			asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3413		} else if ((tsn == asoc->cumulative_tsn) ||
3414		    (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3415			/* we can do it now */
3416			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3417			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3418			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3419		} else {
3420			/*
3421			 * we must queue it up and thus wait for the TSN's
3422			 * to arrive that are at or before tsn
3423			 */
3424			struct sctp_stream_reset_list *liste;
3425			int siz;
3426
3427			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3428			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3429			    siz, SCTP_M_STRESET);
3430			if (liste == NULL) {
3431				/* gak out of memory */
3432				sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3433				asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3434				return;
3435			}
3436			liste->tsn = tsn;
3437			liste->number_entries = number_entries;
3438			memcpy(&liste->req, req,
3439			    (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3440			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3441			sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3442			asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3443		}
3444		asoc->str_reset_seq_in++;
3445	} else if ((asoc->str_reset_seq_in - 1) == seq) {
3446		/*
3447		 * one seq back, just echo back last action since my
3448		 * response was lost.
3449		 */
3450		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3451	} else if ((asoc->str_reset_seq_in - 2) == seq) {
3452		/*
3453		 * two seq back, just echo back last action since my
3454		 * response was lost.
3455		 */
3456		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3457	} else {
3458		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3459	}
3460}
3461
3462#ifdef __GNUC__
3463__attribute__((noinline))
3464#endif
3465	static int
3466	    sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
3467        struct sctp_stream_reset_out_req *sr_req)
3468{
3469	int chk_length, param_len, ptype;
3470	struct sctp_paramhdr pstore;
3471	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
3472
3473	uint32_t seq;
3474	int num_req = 0;
3475	int trunc = 0;
3476	struct sctp_tmit_chunk *chk;
3477	struct sctp_chunkhdr *ch;
3478	struct sctp_paramhdr *ph;
3479	int ret_code = 0;
3480	int num_param = 0;
3481
3482	/* now it may be a reset or a reset-response */
3483	chk_length = ntohs(sr_req->ch.chunk_length);
3484
3485	/* setup for adding the response */
3486	sctp_alloc_a_chunk(stcb, chk);
3487	if (chk == NULL) {
3488		return (ret_code);
3489	}
3490	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3491	chk->rec.chunk_id.can_take_data = 0;
3492	chk->asoc = &stcb->asoc;
3493	chk->no_fr_allowed = 0;
3494	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3495	chk->book_size_scale = 0;
3496	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3497	if (chk->data == NULL) {
3498strres_nochunk:
3499		if (chk->data) {
3500			sctp_m_freem(chk->data);
3501			chk->data = NULL;
3502		}
3503		sctp_free_a_chunk(stcb, chk);
3504		return (ret_code);
3505	}
3506	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3507
3508	/* setup chunk parameters */
3509	chk->sent = SCTP_DATAGRAM_UNSENT;
3510	chk->snd_count = 0;
3511	chk->whoTo = stcb->asoc.primary_destination;
3512	atomic_add_int(&chk->whoTo->ref_count, 1);
3513
3514	ch = mtod(chk->data, struct sctp_chunkhdr *);
3515	ch->chunk_type = SCTP_STREAM_RESET;
3516	ch->chunk_flags = 0;
3517	ch->chunk_length = htons(chk->send_size);
3518	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3519	offset += sizeof(struct sctp_chunkhdr);
3520	while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3521		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
3522		if (ph == NULL)
3523			break;
3524		param_len = ntohs(ph->param_length);
3525		if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3526			/* bad param */
3527			break;
3528		}
3529		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
3530		    (uint8_t *) & cstore);
3531		ptype = ntohs(ph->param_type);
3532		num_param++;
3533		if (param_len > (int)sizeof(cstore)) {
3534			trunc = 1;
3535		} else {
3536			trunc = 0;
3537		}
3538
3539		if (num_param > SCTP_MAX_RESET_PARAMS) {
3540			/* hit the max of parameters already sorry.. */
3541			break;
3542		}
3543		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3544			struct sctp_stream_reset_out_request *req_out;
3545
3546			req_out = (struct sctp_stream_reset_out_request *)ph;
3547			num_req++;
3548			if (stcb->asoc.stream_reset_outstanding) {
3549				seq = ntohl(req_out->response_seq);
3550				if (seq == stcb->asoc.str_reset_seq_out) {
3551					/* implicit ack */
3552					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3553				}
3554			}
3555			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
3556		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3557			struct sctp_stream_reset_in_request *req_in;
3558
3559			num_req++;
3560
3561			req_in = (struct sctp_stream_reset_in_request *)ph;
3562
3563			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
3564		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3565			struct sctp_stream_reset_tsn_request *req_tsn;
3566
3567			num_req++;
3568			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3569
3570			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3571				ret_code = 1;
3572				goto strres_nochunk;
3573			}
3574			/* no more */
3575			break;
3576		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
3577			struct sctp_stream_reset_response *resp;
3578			uint32_t result;
3579
3580			resp = (struct sctp_stream_reset_response *)ph;
3581			seq = ntohl(resp->response_seq);
3582			result = ntohl(resp->result);
3583			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3584				ret_code = 1;
3585				goto strres_nochunk;
3586			}
3587		} else {
3588			break;
3589		}
3590		offset += SCTP_SIZE32(param_len);
3591		chk_length -= SCTP_SIZE32(param_len);
3592	}
3593	if (num_req == 0) {
3594		/* we have no response free the stuff */
3595		goto strres_nochunk;
3596	}
3597	/* ok we have a chunk to link in */
3598	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3599	    chk,
3600	    sctp_next);
3601	stcb->asoc.ctrl_queue_cnt++;
3602	return (ret_code);
3603}
3604
3605/*
3606 * Handle a router or endpoints report of a packet loss, there are two ways
3607 * to handle this, either we get the whole packet and must disect it
3608 * ourselves (possibly with truncation and or corruption) or it is a summary
3609 * from a middle box that did the disectting for us.
3610 */
3611static void
3612sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3613    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
3614{
3615	uint32_t bottle_bw, on_queue;
3616	uint16_t trunc_len;
3617	unsigned int chlen;
3618	unsigned int at;
3619	struct sctp_chunk_desc desc;
3620	struct sctp_chunkhdr *ch;
3621
3622	chlen = ntohs(cp->ch.chunk_length);
3623	chlen -= sizeof(struct sctp_pktdrop_chunk);
3624	/* XXX possible chlen underflow */
3625	if (chlen == 0) {
3626		ch = NULL;
3627		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3628			SCTP_STAT_INCR(sctps_pdrpbwrpt);
3629	} else {
3630		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3631		chlen -= sizeof(struct sctphdr);
3632		/* XXX possible chlen underflow */
3633		memset(&desc, 0, sizeof(desc));
3634	}
3635	trunc_len = (uint16_t) ntohs(cp->trunc_len);
3636	if (trunc_len > limit) {
3637		trunc_len = limit;
3638	}
3639	/* now the chunks themselves */
3640	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3641		desc.chunk_type = ch->chunk_type;
3642		/* get amount we need to move */
3643		at = ntohs(ch->chunk_length);
3644		if (at < sizeof(struct sctp_chunkhdr)) {
3645			/* corrupt chunk, maybe at the end? */
3646			SCTP_STAT_INCR(sctps_pdrpcrupt);
3647			break;
3648		}
3649		if (trunc_len == 0) {
3650			/* we are supposed to have all of it */
3651			if (at > chlen) {
3652				/* corrupt skip it */
3653				SCTP_STAT_INCR(sctps_pdrpcrupt);
3654				break;
3655			}
3656		} else {
3657			/* is there enough of it left ? */
3658			if (desc.chunk_type == SCTP_DATA) {
3659				if (chlen < (sizeof(struct sctp_data_chunk) +
3660				    sizeof(desc.data_bytes))) {
3661					break;
3662				}
3663			} else {
3664				if (chlen < sizeof(struct sctp_chunkhdr)) {
3665					break;
3666				}
3667			}
3668		}
3669		if (desc.chunk_type == SCTP_DATA) {
3670			/* can we get out the tsn? */
3671			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3672				SCTP_STAT_INCR(sctps_pdrpmbda);
3673
3674			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
3675				/* yep */
3676				struct sctp_data_chunk *dcp;
3677				uint8_t *ddp;
3678				unsigned int iii;
3679
3680				dcp = (struct sctp_data_chunk *)ch;
3681				ddp = (uint8_t *) (dcp + 1);
3682				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
3683					desc.data_bytes[iii] = ddp[iii];
3684				}
3685				desc.tsn_ifany = dcp->dp.tsn;
3686			} else {
3687				/* nope we are done. */
3688				SCTP_STAT_INCR(sctps_pdrpnedat);
3689				break;
3690			}
3691		} else {
3692			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3693				SCTP_STAT_INCR(sctps_pdrpmbct);
3694		}
3695
3696		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
3697			SCTP_STAT_INCR(sctps_pdrppdbrk);
3698			break;
3699		}
3700		if (SCTP_SIZE32(at) > chlen) {
3701			break;
3702		}
3703		chlen -= SCTP_SIZE32(at);
3704		if (chlen < sizeof(struct sctp_chunkhdr)) {
3705			/* done, none left */
3706			break;
3707		}
3708		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
3709	}
3710	/* Now update any rwnd --- possibly */
3711	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
3712		/* From a peer, we get a rwnd report */
3713		uint32_t a_rwnd;
3714
3715		SCTP_STAT_INCR(sctps_pdrpfehos);
3716
3717		bottle_bw = ntohl(cp->bottle_bw);
3718		on_queue = ntohl(cp->current_onq);
3719		if (bottle_bw && on_queue) {
3720			/* a rwnd report is in here */
3721			if (bottle_bw > on_queue)
3722				a_rwnd = bottle_bw - on_queue;
3723			else
3724				a_rwnd = 0;
3725
3726			if (a_rwnd == 0)
3727				stcb->asoc.peers_rwnd = 0;
3728			else {
3729				if (a_rwnd > stcb->asoc.total_flight) {
3730					stcb->asoc.peers_rwnd =
3731					    a_rwnd - stcb->asoc.total_flight;
3732				} else {
3733					stcb->asoc.peers_rwnd = 0;
3734				}
3735				if (stcb->asoc.peers_rwnd <
3736				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3737					/* SWS sender side engages */
3738					stcb->asoc.peers_rwnd = 0;
3739				}
3740			}
3741		}
3742	} else {
3743		SCTP_STAT_INCR(sctps_pdrpfmbox);
3744	}
3745
3746	/* now middle boxes in sat networks get a cwnd bump */
3747	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
3748	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
3749	    (stcb->asoc.sat_network)) {
3750		/*
3751		 * This is debateable but for sat networks it makes sense
3752		 * Note if a T3 timer has went off, we will prohibit any
3753		 * changes to cwnd until we exit the t3 loss recovery.
3754		 */
3755		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
3756		    net, cp, &bottle_bw, &on_queue);
3757	}
3758}
3759
3760/*
3761 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
3762 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
3763 * offset: offset into the mbuf chain to first chunkhdr - length: is the
3764 * length of the complete packet outputs: - length: modified to remaining
3765 * length after control processing - netp: modified to new sctp_nets after
3766 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
3767 * bad packet,...) otherwise return the tcb for this packet
3768 */
3769#ifdef __GNUC__
3770__attribute__((noinline))
3771#endif
3772	static struct sctp_tcb *
3773	         sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
3774             struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
3775             struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
3776             uint32_t vrf_id)
3777{
3778	struct sctp_association *asoc;
3779	uint32_t vtag_in;
3780	int num_chunks = 0;	/* number of control chunks processed */
3781	uint32_t chk_length;
3782	int ret;
3783	int abort_no_unlock = 0;
3784
3785	/*
3786	 * How big should this be, and should it be alloc'd? Lets try the
3787	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
3788	 * until we get into jumbo grams and such..
3789	 */
3790	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
3791	struct sctp_tcb *locked_tcb = stcb;
3792	int got_auth = 0;
3793	uint32_t auth_offset = 0, auth_len = 0;
3794	int auth_skipped = 0;
3795	int asconf_cnt = 0;
3796
3797#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3798	struct socket *so;
3799
3800#endif
3801
3802	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
3803	    iphlen, *offset, length, stcb);
3804
3805	/* validate chunk header length... */
3806	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
3807		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
3808		    ntohs(ch->chunk_length));
3809		if (locked_tcb) {
3810			SCTP_TCB_UNLOCK(locked_tcb);
3811		}
3812		return (NULL);
3813	}
3814	/*
3815	 * validate the verification tag
3816	 */
3817	vtag_in = ntohl(sh->v_tag);
3818
3819	if (locked_tcb) {
3820		SCTP_TCB_LOCK_ASSERT(locked_tcb);
3821	}
3822	if (ch->chunk_type == SCTP_INITIATION) {
3823		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
3824		    ntohs(ch->chunk_length), vtag_in);
3825		if (vtag_in != 0) {
3826			/* protocol error- silently discard... */
3827			SCTP_STAT_INCR(sctps_badvtag);
3828			if (locked_tcb) {
3829				SCTP_TCB_UNLOCK(locked_tcb);
3830			}
3831			return (NULL);
3832		}
3833	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
3834		/*
3835		 * If there is no stcb, skip the AUTH chunk and process
3836		 * later after a stcb is found (to validate the lookup was
3837		 * valid.
3838		 */
3839		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
3840		    (stcb == NULL) && !sctp_auth_disable) {
3841			/* save this chunk for later processing */
3842			auth_skipped = 1;
3843			auth_offset = *offset;
3844			auth_len = ntohs(ch->chunk_length);
3845
3846			/* (temporarily) move past this chunk */
3847			*offset += SCTP_SIZE32(auth_len);
3848			if (*offset >= length) {
3849				/* no more data left in the mbuf chain */
3850				*offset = length;
3851				if (locked_tcb) {
3852					SCTP_TCB_UNLOCK(locked_tcb);
3853				}
3854				return (NULL);
3855			}
3856			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3857			    sizeof(struct sctp_chunkhdr), chunk_buf);
3858		}
3859		if (ch == NULL) {
3860			/* Help */
3861			*offset = length;
3862			if (locked_tcb) {
3863				SCTP_TCB_UNLOCK(locked_tcb);
3864			}
3865			return (NULL);
3866		}
3867		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3868			goto process_control_chunks;
3869		}
3870		/*
3871		 * first check if it's an ASCONF with an unknown src addr we
3872		 * need to look inside to find the association
3873		 */
3874		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
3875			struct sctp_chunkhdr *asconf_ch = ch;
3876			uint32_t asconf_offset = 0, asconf_len = 0;
3877
3878			/* inp's refcount may be reduced */
3879			SCTP_INP_INCR_REF(inp);
3880
3881			asconf_offset = *offset;
3882			do {
3883				asconf_len = ntohs(asconf_ch->chunk_length);
3884				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
3885					break;
3886				stcb = sctp_findassociation_ep_asconf(m, iphlen,
3887				    *offset, sh, &inp, netp);
3888				if (stcb != NULL)
3889					break;
3890				asconf_offset += SCTP_SIZE32(asconf_len);
3891				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
3892				    sizeof(struct sctp_chunkhdr), chunk_buf);
3893			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
3894			if (stcb == NULL) {
3895				/*
3896				 * reduce inp's refcount if not reduced in
3897				 * sctp_findassociation_ep_asconf().
3898				 */
3899				SCTP_INP_DECR_REF(inp);
3900			} else {
3901				locked_tcb = stcb;
3902			}
3903
3904			/* now go back and verify any auth chunk to be sure */
3905			if (auth_skipped && (stcb != NULL)) {
3906				struct sctp_auth_chunk *auth;
3907
3908				auth = (struct sctp_auth_chunk *)
3909				    sctp_m_getptr(m, auth_offset,
3910				    auth_len, chunk_buf);
3911				got_auth = 1;
3912				auth_skipped = 0;
3913				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
3914				    auth_offset)) {
3915					/* auth HMAC failed so dump it */
3916					*offset = length;
3917					if (locked_tcb) {
3918						SCTP_TCB_UNLOCK(locked_tcb);
3919					}
3920					return (NULL);
3921				} else {
3922					/* remaining chunks are HMAC checked */
3923					stcb->asoc.authenticated = 1;
3924				}
3925			}
3926		}
3927		if (stcb == NULL) {
3928			/* no association, so it's out of the blue... */
3929			sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
3930			    vrf_id);
3931			*offset = length;
3932			if (locked_tcb) {
3933				SCTP_TCB_UNLOCK(locked_tcb);
3934			}
3935			return (NULL);
3936		}
3937		asoc = &stcb->asoc;
3938		/* ABORT and SHUTDOWN can use either v_tag... */
3939		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
3940		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
3941		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
3942			if ((vtag_in == asoc->my_vtag) ||
3943			    ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
3944			    (vtag_in == asoc->peer_vtag))) {
3945				/* this is valid */
3946			} else {
3947				/* drop this packet... */
3948				SCTP_STAT_INCR(sctps_badvtag);
3949				if (locked_tcb) {
3950					SCTP_TCB_UNLOCK(locked_tcb);
3951				}
3952				return (NULL);
3953			}
3954		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
3955			if (vtag_in != asoc->my_vtag) {
3956				/*
3957				 * this could be a stale SHUTDOWN-ACK or the
3958				 * peer never got the SHUTDOWN-COMPLETE and
3959				 * is still hung; we have started a new asoc
3960				 * but it won't complete until the shutdown
3961				 * is completed
3962				 */
3963				if (locked_tcb) {
3964					SCTP_TCB_UNLOCK(locked_tcb);
3965				}
3966				sctp_handle_ootb(m, iphlen, *offset, sh, inp,
3967				    NULL, vrf_id);
3968				return (NULL);
3969			}
3970		} else {
3971			/* for all other chunks, vtag must match */
3972			if (vtag_in != asoc->my_vtag) {
3973				/* invalid vtag... */
3974				SCTPDBG(SCTP_DEBUG_INPUT3,
3975				    "invalid vtag: %xh, expect %xh\n",
3976				    vtag_in, asoc->my_vtag);
3977				SCTP_STAT_INCR(sctps_badvtag);
3978				if (locked_tcb) {
3979					SCTP_TCB_UNLOCK(locked_tcb);
3980				}
3981				*offset = length;
3982				return (NULL);
3983			}
3984		}
3985	}			/* end if !SCTP_COOKIE_ECHO */
3986	/*
3987	 * process all control chunks...
3988	 */
3989	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
3990	    (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
3991	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
3992		/* implied cookie-ack.. we must have lost the ack */
3993		if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
3994			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3995			    stcb->asoc.overall_error_count,
3996			    0,
3997			    SCTP_FROM_SCTP_INPUT,
3998			    __LINE__);
3999		}
4000		stcb->asoc.overall_error_count = 0;
4001		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4002		    *netp);
4003	}
4004process_control_chunks:
4005	while (IS_SCTP_CONTROL(ch)) {
4006		/* validate chunk length */
4007		chk_length = ntohs(ch->chunk_length);
4008		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4009		    ch->chunk_type, chk_length);
4010		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4011		if (chk_length < sizeof(*ch) ||
4012		    (*offset + (int)chk_length) > length) {
4013			*offset = length;
4014			if (locked_tcb) {
4015				SCTP_TCB_UNLOCK(locked_tcb);
4016			}
4017			return (NULL);
4018		}
4019		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4020		/*
4021		 * INIT-ACK only gets the init ack "header" portion only
4022		 * because we don't have to process the peer's COOKIE. All
4023		 * others get a complete chunk.
4024		 */
4025		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4026		    (ch->chunk_type == SCTP_INITIATION)) {
4027			/* get an init-ack chunk */
4028			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4029			    sizeof(struct sctp_init_ack_chunk), chunk_buf);
4030			if (ch == NULL) {
4031				*offset = length;
4032				if (locked_tcb) {
4033					SCTP_TCB_UNLOCK(locked_tcb);
4034				}
4035				return (NULL);
4036			}
4037		} else {
4038			/* For cookies and all other chunks. */
4039			if (chk_length > sizeof(chunk_buf)) {
4040				/*
4041				 * use just the size of the chunk buffer so
4042				 * the front part of our chunks fit in
4043				 * contiguous space up to the chunk buffer
4044				 * size (508 bytes). For chunks that need to
4045				 * get more than that they must use the
4046				 * sctp_m_getptr() function or other means
4047				 * (e.g. know how to parse mbuf chains).
4048				 * Cookies do this already.
4049				 */
4050				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4051				    (sizeof(chunk_buf) - 4),
4052				    chunk_buf);
4053				if (ch == NULL) {
4054					*offset = length;
4055					if (locked_tcb) {
4056						SCTP_TCB_UNLOCK(locked_tcb);
4057					}
4058					return (NULL);
4059				}
4060			} else {
4061				/* We can fit it all */
4062				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4063				    chk_length, chunk_buf);
4064				if (ch == NULL) {
4065					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4066					*offset = length;
4067					if (locked_tcb) {
4068						SCTP_TCB_UNLOCK(locked_tcb);
4069					}
4070					return (NULL);
4071				}
4072			}
4073		}
4074		num_chunks++;
4075		/* Save off the last place we got a control from */
4076		if (stcb != NULL) {
4077			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4078				/*
4079				 * allow last_control to be NULL if
4080				 * ASCONF... ASCONF processing will find the
4081				 * right net later
4082				 */
4083				if ((netp != NULL) && (*netp != NULL))
4084					stcb->asoc.last_control_chunk_from = *netp;
4085			}
4086		}
4087#ifdef SCTP_AUDITING_ENABLED
4088		sctp_audit_log(0xB0, ch->chunk_type);
4089#endif
4090
4091		/* check to see if this chunk required auth, but isn't */
4092		if ((stcb != NULL) && !sctp_auth_disable &&
4093		    sctp_auth_is_required_chunk(ch->chunk_type,
4094		    stcb->asoc.local_auth_chunks) &&
4095		    !stcb->asoc.authenticated) {
4096			/* "silently" ignore */
4097			SCTP_STAT_INCR(sctps_recvauthmissing);
4098			goto next_chunk;
4099		}
4100		switch (ch->chunk_type) {
4101		case SCTP_INITIATION:
4102			/* must be first and only chunk */
4103			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
4104			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4105				/* We are not interested anymore? */
4106				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4107					/*
4108					 * collision case where we are
4109					 * sending to them too
4110					 */
4111					;
4112				} else {
4113					if (locked_tcb) {
4114						SCTP_TCB_UNLOCK(locked_tcb);
4115					}
4116					*offset = length;
4117					return (NULL);
4118				}
4119			}
4120			if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
4121			    (num_chunks > 1) ||
4122			    (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4123				*offset = length;
4124				if (locked_tcb) {
4125					SCTP_TCB_UNLOCK(locked_tcb);
4126				}
4127				return (NULL);
4128			}
4129			if ((stcb != NULL) &&
4130			    (SCTP_GET_STATE(&stcb->asoc) ==
4131			    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4132				sctp_send_shutdown_ack(stcb,
4133				    stcb->asoc.primary_destination);
4134				*offset = length;
4135				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4136				if (locked_tcb) {
4137					SCTP_TCB_UNLOCK(locked_tcb);
4138				}
4139				return (NULL);
4140			}
4141			if (netp) {
4142				sctp_handle_init(m, iphlen, *offset, sh,
4143				    (struct sctp_init_chunk *)ch, inp,
4144				    stcb, *netp, &abort_no_unlock, vrf_id);
4145			}
4146			if (abort_no_unlock)
4147				return (NULL);
4148
4149			*offset = length;
4150			if (locked_tcb) {
4151				SCTP_TCB_UNLOCK(locked_tcb);
4152			}
4153			return (NULL);
4154			break;
4155		case SCTP_PAD_CHUNK:
4156			break;
4157		case SCTP_INITIATION_ACK:
4158			/* must be first and only chunk */
4159			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
4160			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4161				/* We are not interested anymore */
4162				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4163					;
4164				} else {
4165					if (locked_tcb) {
4166						SCTP_TCB_UNLOCK(locked_tcb);
4167					}
4168					*offset = length;
4169					if (stcb) {
4170#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4171						so = SCTP_INP_SO(inp);
4172						atomic_add_int(&stcb->asoc.refcnt, 1);
4173						SCTP_TCB_UNLOCK(stcb);
4174						SCTP_SOCKET_LOCK(so, 1);
4175						SCTP_TCB_LOCK(stcb);
4176						atomic_subtract_int(&stcb->asoc.refcnt, 1);
4177#endif
4178						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4179#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4180						SCTP_SOCKET_UNLOCK(so, 1);
4181#endif
4182					}
4183					return (NULL);
4184				}
4185			}
4186			if ((num_chunks > 1) ||
4187			    (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
4188				*offset = length;
4189				if (locked_tcb) {
4190					SCTP_TCB_UNLOCK(locked_tcb);
4191				}
4192				return (NULL);
4193			}
4194			if ((netp) && (*netp)) {
4195				ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
4196				    (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
4197			} else {
4198				ret = -1;
4199			}
4200			/*
4201			 * Special case, I must call the output routine to
4202			 * get the cookie echoed
4203			 */
4204			if (abort_no_unlock)
4205				return (NULL);
4206
4207			if ((stcb) && ret == 0)
4208				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
4209			*offset = length;
4210			if (locked_tcb) {
4211				SCTP_TCB_UNLOCK(locked_tcb);
4212			}
4213			return (NULL);
4214			break;
4215		case SCTP_SELECTIVE_ACK:
4216			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
4217			SCTP_STAT_INCR(sctps_recvsacks);
4218			{
4219				struct sctp_sack_chunk *sack;
4220				int abort_now = 0;
4221				uint32_t a_rwnd, cum_ack;
4222				uint16_t num_seg;
4223				int nonce_sum_flag;
4224
4225				if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
4226					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
4227			ignore_sack:
4228					*offset = length;
4229					if (locked_tcb) {
4230						SCTP_TCB_UNLOCK(locked_tcb);
4231					}
4232					return (NULL);
4233				}
4234				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
4235					/*-
4236					 * If we have sent a shutdown-ack, we will pay no
4237					 * attention to a sack sent in to us since
4238					 * we don't care anymore.
4239					 */
4240					goto ignore_sack;
4241				}
4242				sack = (struct sctp_sack_chunk *)ch;
4243				nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
4244				cum_ack = ntohl(sack->sack.cum_tsn_ack);
4245				num_seg = ntohs(sack->sack.num_gap_ack_blks);
4246				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
4247				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
4248				    cum_ack,
4249				    num_seg,
4250				    a_rwnd
4251				    );
4252				stcb->asoc.seen_a_sack_this_pkt = 1;
4253				if ((stcb->asoc.pr_sctp_cnt == 0) &&
4254				    (num_seg == 0) &&
4255				    ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
4256				    (cum_ack == stcb->asoc.last_acked_seq)) &&
4257				    (stcb->asoc.saw_sack_with_frags == 0) &&
4258				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
4259				    ) {
4260					/*
4261					 * We have a SIMPLE sack having no
4262					 * prior segments and data on sent
4263					 * queue to be acked.. Use the
4264					 * faster path sack processing. We
4265					 * also allow window update sacks
4266					 * with no missing segments to go
4267					 * this way too.
4268					 */
4269					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
4270					    &abort_now);
4271				} else {
4272					if (netp && *netp)
4273						sctp_handle_sack(m, *offset,
4274						    sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
4275				}
4276				if (abort_now) {
4277					/* ABORT signal from sack processing */
4278					*offset = length;
4279					return (NULL);
4280				}
4281			}
4282			break;
4283		case SCTP_HEARTBEAT_REQUEST:
4284			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
4285			if ((stcb) && netp && *netp) {
4286				SCTP_STAT_INCR(sctps_recvheartbeat);
4287				sctp_send_heartbeat_ack(stcb, m, *offset,
4288				    chk_length, *netp);
4289
4290				/* He's alive so give him credit */
4291				if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4292					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4293					    stcb->asoc.overall_error_count,
4294					    0,
4295					    SCTP_FROM_SCTP_INPUT,
4296					    __LINE__);
4297				}
4298				stcb->asoc.overall_error_count = 0;
4299			}
4300			break;
4301		case SCTP_HEARTBEAT_ACK:
4302			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
4303			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
4304				/* Its not ours */
4305				*offset = length;
4306				if (locked_tcb) {
4307					SCTP_TCB_UNLOCK(locked_tcb);
4308				}
4309				return (NULL);
4310			}
4311			/* He's alive so give him credit */
4312			if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4313				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4314				    stcb->asoc.overall_error_count,
4315				    0,
4316				    SCTP_FROM_SCTP_INPUT,
4317				    __LINE__);
4318			}
4319			stcb->asoc.overall_error_count = 0;
4320			SCTP_STAT_INCR(sctps_recvheartbeatack);
4321			if (netp && *netp)
4322				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
4323				    stcb, *netp);
4324			break;
4325		case SCTP_ABORT_ASSOCIATION:
4326			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
4327			    stcb);
4328			if ((stcb) && netp && *netp)
4329				sctp_handle_abort((struct sctp_abort_chunk *)ch,
4330				    stcb, *netp);
4331			*offset = length;
4332			return (NULL);
4333			break;
4334		case SCTP_SHUTDOWN:
4335			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
4336			    stcb);
4337			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
4338				*offset = length;
4339				if (locked_tcb) {
4340					SCTP_TCB_UNLOCK(locked_tcb);
4341				}
4342				return (NULL);
4343			}
4344			if (netp && *netp) {
4345				int abort_flag = 0;
4346
4347				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
4348				    stcb, *netp, &abort_flag);
4349				if (abort_flag) {
4350					*offset = length;
4351					return (NULL);
4352				}
4353			}
4354			break;
4355		case SCTP_SHUTDOWN_ACK:
4356			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
4357			if ((stcb) && (netp) && (*netp))
4358				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
4359			*offset = length;
4360			return (NULL);
4361			break;
4362
4363		case SCTP_OPERATION_ERROR:
4364			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
4365			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
4366
4367				*offset = length;
4368				return (NULL);
4369			}
4370			break;
4371		case SCTP_COOKIE_ECHO:
4372			SCTPDBG(SCTP_DEBUG_INPUT3,
4373			    "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
4374			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4375				;
4376			} else {
4377				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4378					/* We are not interested anymore */
4379					*offset = length;
4380					return (NULL);
4381				}
4382			}
4383			/*
4384			 * First are we accepting? We do this again here
4385			 * sincen it is possible that a previous endpoint
4386			 * WAS listening responded to a INIT-ACK and then
4387			 * closed. We opened and bound.. and are now no
4388			 * longer listening.
4389			 */
4390			if (inp->sctp_socket->so_qlimit == 0) {
4391				if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4392					/*
4393					 * special case, is this a retran'd
4394					 * COOKIE-ECHO or a restarting assoc
4395					 * that is a peeled off or
4396					 * one-to-one style socket.
4397					 */
4398					goto process_cookie_anyway;
4399				}
4400				sctp_abort_association(inp, stcb, m, iphlen,
4401				    sh, NULL, vrf_id);
4402				*offset = length;
4403				return (NULL);
4404			} else if (inp->sctp_socket->so_qlimit) {
4405				/* we are accepting so check limits like TCP */
4406				if (inp->sctp_socket->so_qlen >=
4407				    inp->sctp_socket->so_qlimit) {
4408					/* no space */
4409					struct mbuf *oper;
4410					struct sctp_paramhdr *phdr;
4411
4412					if (sctp_abort_if_one_2_one_hits_limit) {
4413						oper = NULL;
4414						oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4415						    0, M_DONTWAIT, 1, MT_DATA);
4416						if (oper) {
4417							SCTP_BUF_LEN(oper) =
4418							    sizeof(struct sctp_paramhdr);
4419							phdr = mtod(oper,
4420							    struct sctp_paramhdr *);
4421							phdr->param_type =
4422							    htons(SCTP_CAUSE_OUT_OF_RESC);
4423							phdr->param_length =
4424							    htons(sizeof(struct sctp_paramhdr));
4425						}
4426						sctp_abort_association(inp, stcb, m,
4427						    iphlen, sh, oper, vrf_id);
4428					}
4429					*offset = length;
4430					return (NULL);
4431				}
4432			}
4433	process_cookie_anyway:
4434			{
4435				struct mbuf *ret_buf;
4436				struct sctp_inpcb *linp;
4437
4438				if (stcb) {
4439					linp = NULL;
4440				} else {
4441					linp = inp;
4442				}
4443
4444				if (linp) {
4445					SCTP_ASOC_CREATE_LOCK(linp);
4446				}
4447				if (netp) {
4448					ret_buf =
4449					    sctp_handle_cookie_echo(m, iphlen,
4450					    *offset, sh,
4451					    (struct sctp_cookie_echo_chunk *)ch,
4452					    &inp, &stcb, netp,
4453					    auth_skipped,
4454					    auth_offset,
4455					    auth_len,
4456					    &locked_tcb,
4457					    vrf_id);
4458				} else {
4459					ret_buf = NULL;
4460				}
4461				if (linp) {
4462					SCTP_ASOC_CREATE_UNLOCK(linp);
4463				}
4464				if (ret_buf == NULL) {
4465					if (locked_tcb) {
4466						SCTP_TCB_UNLOCK(locked_tcb);
4467					}
4468					SCTPDBG(SCTP_DEBUG_INPUT3,
4469					    "GAK, null buffer\n");
4470					auth_skipped = 0;
4471					*offset = length;
4472					return (NULL);
4473				}
4474				/* if AUTH skipped, see if it verified... */
4475				if (auth_skipped) {
4476					got_auth = 1;
4477					auth_skipped = 0;
4478				}
4479				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4480					/*
4481					 * Restart the timer if we have
4482					 * pending data
4483					 */
4484					struct sctp_tmit_chunk *chk;
4485
4486					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4487					if (chk) {
4488						sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4489						    stcb->sctp_ep, stcb,
4490						    chk->whoTo);
4491					}
4492				}
4493			}
4494			break;
4495		case SCTP_COOKIE_ACK:
4496			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4497			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4498				if (locked_tcb) {
4499					SCTP_TCB_UNLOCK(locked_tcb);
4500				}
4501				return (NULL);
4502			}
4503			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4504				/* We are not interested anymore */
4505				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4506					;
4507				} else if (stcb) {
4508#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4509					so = SCTP_INP_SO(inp);
4510					atomic_add_int(&stcb->asoc.refcnt, 1);
4511					SCTP_TCB_UNLOCK(stcb);
4512					SCTP_SOCKET_LOCK(so, 1);
4513					SCTP_TCB_LOCK(stcb);
4514					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4515#endif
4516					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4517#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4518					SCTP_SOCKET_UNLOCK(so, 1);
4519#endif
4520					*offset = length;
4521					return (NULL);
4522				}
4523			}
4524			/* He's alive so give him credit */
4525			if ((stcb) && netp && *netp) {
4526				if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4527					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4528					    stcb->asoc.overall_error_count,
4529					    0,
4530					    SCTP_FROM_SCTP_INPUT,
4531					    __LINE__);
4532				}
4533				stcb->asoc.overall_error_count = 0;
4534				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4535			}
4536			break;
4537		case SCTP_ECN_ECHO:
4538			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4539			/* He's alive so give him credit */
4540			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4541				/* Its not ours */
4542				if (locked_tcb) {
4543					SCTP_TCB_UNLOCK(locked_tcb);
4544				}
4545				*offset = length;
4546				return (NULL);
4547			}
4548			if (stcb) {
4549				if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4550					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4551					    stcb->asoc.overall_error_count,
4552					    0,
4553					    SCTP_FROM_SCTP_INPUT,
4554					    __LINE__);
4555				}
4556				stcb->asoc.overall_error_count = 0;
4557				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
4558				    stcb);
4559			}
4560			break;
4561		case SCTP_ECN_CWR:
4562			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
4563			/* He's alive so give him credit */
4564			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
4565				/* Its not ours */
4566				if (locked_tcb) {
4567					SCTP_TCB_UNLOCK(locked_tcb);
4568				}
4569				*offset = length;
4570				return (NULL);
4571			}
4572			if (stcb) {
4573				if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4574					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4575					    stcb->asoc.overall_error_count,
4576					    0,
4577					    SCTP_FROM_SCTP_INPUT,
4578					    __LINE__);
4579				}
4580				stcb->asoc.overall_error_count = 0;
4581				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
4582			}
4583			break;
4584		case SCTP_SHUTDOWN_COMPLETE:
4585			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
4586			/* must be first and only chunk */
4587			if ((num_chunks > 1) ||
4588			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4589				*offset = length;
4590				if (locked_tcb) {
4591					SCTP_TCB_UNLOCK(locked_tcb);
4592				}
4593				return (NULL);
4594			}
4595			if ((stcb) && netp && *netp) {
4596				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
4597				    stcb, *netp);
4598			}
4599			*offset = length;
4600			return (NULL);
4601			break;
4602		case SCTP_ASCONF:
4603			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
4604			/* He's alive so give him credit */
4605			if (stcb) {
4606				if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4607					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4608					    stcb->asoc.overall_error_count,
4609					    0,
4610					    SCTP_FROM_SCTP_INPUT,
4611					    __LINE__);
4612				}
4613				stcb->asoc.overall_error_count = 0;
4614				sctp_handle_asconf(m, *offset,
4615				    (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
4616				asconf_cnt++;
4617			}
4618			break;
4619		case SCTP_ASCONF_ACK:
4620			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
4621			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
4622				/* Its not ours */
4623				if (locked_tcb) {
4624					SCTP_TCB_UNLOCK(locked_tcb);
4625				}
4626				*offset = length;
4627				return (NULL);
4628			}
4629			if ((stcb) && netp && *netp) {
4630				/* He's alive so give him credit */
4631				if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4632					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4633					    stcb->asoc.overall_error_count,
4634					    0,
4635					    SCTP_FROM_SCTP_INPUT,
4636					    __LINE__);
4637				}
4638				stcb->asoc.overall_error_count = 0;
4639				sctp_handle_asconf_ack(m, *offset,
4640				    (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
4641				if (abort_no_unlock)
4642					return (NULL);
4643			}
4644			break;
4645		case SCTP_FORWARD_CUM_TSN:
4646			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
4647			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
4648				/* Its not ours */
4649				if (locked_tcb) {
4650					SCTP_TCB_UNLOCK(locked_tcb);
4651				}
4652				*offset = length;
4653				return (NULL);
4654			}
4655			/* He's alive so give him credit */
4656			if (stcb) {
4657				int abort_flag = 0;
4658
4659				stcb->asoc.overall_error_count = 0;
4660				if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4661					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4662					    stcb->asoc.overall_error_count,
4663					    0,
4664					    SCTP_FROM_SCTP_INPUT,
4665					    __LINE__);
4666				}
4667				*fwd_tsn_seen = 1;
4668				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4669					/* We are not interested anymore */
4670#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4671					so = SCTP_INP_SO(inp);
4672					atomic_add_int(&stcb->asoc.refcnt, 1);
4673					SCTP_TCB_UNLOCK(stcb);
4674					SCTP_SOCKET_LOCK(so, 1);
4675					SCTP_TCB_LOCK(stcb);
4676					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4677#endif
4678					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4679#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4680					SCTP_SOCKET_UNLOCK(so, 1);
4681#endif
4682					*offset = length;
4683					return (NULL);
4684				}
4685				sctp_handle_forward_tsn(stcb,
4686				    (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
4687				if (abort_flag) {
4688					*offset = length;
4689					return (NULL);
4690				} else {
4691					if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4692						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4693						    stcb->asoc.overall_error_count,
4694						    0,
4695						    SCTP_FROM_SCTP_INPUT,
4696						    __LINE__);
4697					}
4698					stcb->asoc.overall_error_count = 0;
4699				}
4700
4701			}
4702			break;
4703		case SCTP_STREAM_RESET:
4704			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
4705			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
4706				/* Its not ours */
4707				if (locked_tcb) {
4708					SCTP_TCB_UNLOCK(locked_tcb);
4709				}
4710				*offset = length;
4711				return (NULL);
4712			}
4713			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4714				/* We are not interested anymore */
4715#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4716				so = SCTP_INP_SO(inp);
4717				atomic_add_int(&stcb->asoc.refcnt, 1);
4718				SCTP_TCB_UNLOCK(stcb);
4719				SCTP_SOCKET_LOCK(so, 1);
4720				SCTP_TCB_LOCK(stcb);
4721				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4722#endif
4723				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
4724#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4725				SCTP_SOCKET_UNLOCK(so, 1);
4726#endif
4727				*offset = length;
4728				return (NULL);
4729			}
4730			if (stcb->asoc.peer_supports_strreset == 0) {
4731				/*
4732				 * hmm, peer should have announced this, but
4733				 * we will turn it on since he is sending us
4734				 * a stream reset.
4735				 */
4736				stcb->asoc.peer_supports_strreset = 1;
4737			}
4738			if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
4739				/* stop processing */
4740				*offset = length;
4741				return (NULL);
4742			}
4743			break;
4744		case SCTP_PACKET_DROPPED:
4745			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
4746			/* re-get it all please */
4747			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
4748				/* Its not ours */
4749				if (locked_tcb) {
4750					SCTP_TCB_UNLOCK(locked_tcb);
4751				}
4752				*offset = length;
4753				return (NULL);
4754			}
4755			if (ch && (stcb) && netp && (*netp)) {
4756				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
4757				    stcb, *netp,
4758				    min(chk_length, (sizeof(chunk_buf) - 4)));
4759
4760			}
4761			break;
4762
4763		case SCTP_AUTHENTICATION:
4764			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
4765			if (sctp_auth_disable)
4766				goto unknown_chunk;
4767
4768			if (stcb == NULL) {
4769				/* save the first AUTH for later processing */
4770				if (auth_skipped == 0) {
4771					auth_offset = *offset;
4772					auth_len = chk_length;
4773					auth_skipped = 1;
4774				}
4775				/* skip this chunk (temporarily) */
4776				goto next_chunk;
4777			}
4778			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
4779			    (chk_length > (sizeof(struct sctp_auth_chunk) +
4780			    SCTP_AUTH_DIGEST_LEN_MAX))) {
4781				/* Its not ours */
4782				if (locked_tcb) {
4783					SCTP_TCB_UNLOCK(locked_tcb);
4784				}
4785				*offset = length;
4786				return (NULL);
4787			}
4788			if (got_auth == 1) {
4789				/* skip this chunk... it's already auth'd */
4790				goto next_chunk;
4791			}
4792			got_auth = 1;
4793			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
4794			    m, *offset)) {
4795				/* auth HMAC failed so dump the packet */
4796				*offset = length;
4797				return (stcb);
4798			} else {
4799				/* remaining chunks are HMAC checked */
4800				stcb->asoc.authenticated = 1;
4801			}
4802			break;
4803
4804		default:
4805	unknown_chunk:
4806			/* it's an unknown chunk! */
4807			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
4808				struct mbuf *mm;
4809				struct sctp_paramhdr *phd;
4810
4811				mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4812				    0, M_DONTWAIT, 1, MT_DATA);
4813				if (mm) {
4814					phd = mtod(mm, struct sctp_paramhdr *);
4815					/*
4816					 * We cheat and use param type since
4817					 * we did not bother to define a
4818					 * error cause struct. They are the
4819					 * same basic format with different
4820					 * names.
4821					 */
4822					phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
4823					phd->param_length = htons(chk_length + sizeof(*phd));
4824					SCTP_BUF_LEN(mm) = sizeof(*phd);
4825					SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
4826					    M_DONTWAIT);
4827					if (SCTP_BUF_NEXT(mm)) {
4828						sctp_queue_op_err(stcb, mm);
4829					} else {
4830						sctp_m_freem(mm);
4831					}
4832				}
4833			}
4834			if ((ch->chunk_type & 0x80) == 0) {
4835				/* discard this packet */
4836				*offset = length;
4837				return (stcb);
4838			}	/* else skip this bad chunk and continue... */
4839			break;
4840		}		/* switch (ch->chunk_type) */
4841
4842
4843next_chunk:
4844		/* get the next chunk */
4845		*offset += SCTP_SIZE32(chk_length);
4846		if (*offset >= length) {
4847			/* no more data left in the mbuf chain */
4848			break;
4849		}
4850		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4851		    sizeof(struct sctp_chunkhdr), chunk_buf);
4852		if (ch == NULL) {
4853			if (locked_tcb) {
4854				SCTP_TCB_UNLOCK(locked_tcb);
4855			}
4856			*offset = length;
4857			return (NULL);
4858		}
4859	}			/* while */
4860
4861	if (asconf_cnt > 0 && stcb != NULL) {
4862		sctp_send_asconf_ack(stcb);
4863	}
4864	return (stcb);
4865}
4866
4867
4868/*
4869 * Process the ECN bits we have something set so we must look to see if it is
4870 * ECN(0) or ECN(1) or CE
4871 */
4872static void
4873sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
4874    uint8_t ecn_bits)
4875{
4876	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4877		;
4878	} else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
4879		/*
4880		 * we only add to the nonce sum for ECT1, ECT0 does not
4881		 * change the NS bit (that we have yet to find a way to send
4882		 * it yet).
4883		 */
4884
4885		/* ECN Nonce stuff */
4886		stcb->asoc.receiver_nonce_sum++;
4887		stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
4888
4889		/*
4890		 * Drag up the last_echo point if cumack is larger since we
4891		 * don't want the point falling way behind by more than
4892		 * 2^^31 and then having it be incorrect.
4893		 */
4894		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4895		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4896			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4897		}
4898	} else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
4899		/*
4900		 * Drag up the last_echo point if cumack is larger since we
4901		 * don't want the point falling way behind by more than
4902		 * 2^^31 and then having it be incorrect.
4903		 */
4904		if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4905		    stcb->asoc.last_echo_tsn, MAX_TSN)) {
4906			stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4907		}
4908	}
4909}
4910
4911static void
4912sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
4913    uint32_t high_tsn, uint8_t ecn_bits)
4914{
4915	if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4916		/*
4917		 * we possibly must notify the sender that a congestion
4918		 * window reduction is in order. We do this by adding a ECNE
4919		 * chunk to the output chunk queue. The incoming CWR will
4920		 * remove this chunk.
4921		 */
4922		if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
4923		    MAX_TSN)) {
4924			/* Yep, we need to add a ECNE */
4925			sctp_send_ecn_echo(stcb, net, high_tsn);
4926			stcb->asoc.last_echo_tsn = high_tsn;
4927		}
4928	}
4929}
4930
4931#ifdef INVARIANTS
4932static void
4933sctp_validate_no_locks(struct sctp_inpcb *inp)
4934{
4935	struct sctp_tcb *stcb;
4936
4937	LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
4938		if (mtx_owned(&stcb->tcb_mtx)) {
4939			panic("Own lock on stcb at return from input");
4940		}
4941	}
4942}
4943
4944#endif
4945
4946/*
4947 * common input chunk processing (v4 and v6)
4948 */
4949void
4950sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
4951    int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
4952    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
4953    uint8_t ecn_bits, uint32_t vrf_id)
4954{
4955	/*
4956	 * Control chunk processing
4957	 */
4958	uint32_t high_tsn;
4959	int fwd_tsn_seen = 0, data_processed = 0;
4960	struct mbuf *m = *mm;
4961	int abort_flag = 0;
4962	int un_sent;
4963
4964	SCTP_STAT_INCR(sctps_recvdatagrams);
4965#ifdef SCTP_AUDITING_ENABLED
4966	sctp_audit_log(0xE0, 1);
4967	sctp_auditing(0, inp, stcb, net);
4968#endif
4969
4970	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d stcb:%p\n",
4971	    m, iphlen, offset, stcb);
4972	if (stcb) {
4973		/* always clear this before beginning a packet */
4974		stcb->asoc.authenticated = 0;
4975		stcb->asoc.seen_a_sack_this_pkt = 0;
4976		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
4977		    stcb, stcb->asoc.state);
4978
4979		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
4980		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
4981			/*-
4982			 * If we hit here, we had a ref count
4983			 * up when the assoc was aborted and the
4984			 * timer is clearing out the assoc, we should
4985			 * NOT respond to any packet.. its OOTB.
4986			 */
4987			SCTP_TCB_UNLOCK(stcb);
4988			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4989			    vrf_id);
4990			goto out_now;
4991		}
4992	}
4993	if (IS_SCTP_CONTROL(ch)) {
4994		/* process the control portion of the SCTP packet */
4995		/* sa_ignore NO_NULL_CHK */
4996		stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
4997		    inp, stcb, &net, &fwd_tsn_seen, vrf_id);
4998		if (stcb) {
4999			/*
5000			 * This covers us if the cookie-echo was there and
5001			 * it changes our INP.
5002			 */
5003			inp = stcb->sctp_ep;
5004		}
5005	} else {
5006		/*
5007		 * no control chunks, so pre-process DATA chunks (these
5008		 * checks are taken care of by control processing)
5009		 */
5010
5011		/*
5012		 * if DATA only packet, and auth is required, then punt...
5013		 * can't have authenticated without any AUTH (control)
5014		 * chunks
5015		 */
5016		if ((stcb != NULL) && !sctp_auth_disable &&
5017		    sctp_auth_is_required_chunk(SCTP_DATA,
5018		    stcb->asoc.local_auth_chunks)) {
5019			/* "silently" ignore */
5020			SCTP_STAT_INCR(sctps_recvauthmissing);
5021			SCTP_TCB_UNLOCK(stcb);
5022			goto out_now;
5023		}
5024		if (stcb == NULL) {
5025			/* out of the blue DATA chunk */
5026			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5027			    vrf_id);
5028			goto out_now;
5029		}
5030		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
5031			/* v_tag mismatch! */
5032			SCTP_STAT_INCR(sctps_badvtag);
5033			SCTP_TCB_UNLOCK(stcb);
5034			goto out_now;
5035		}
5036	}
5037
5038	if (stcb == NULL) {
5039		/*
5040		 * no valid TCB for this packet, or we found it's a bad
5041		 * packet while processing control, or we're done with this
5042		 * packet (done or skip rest of data), so we drop it...
5043		 */
5044		goto out_now;
5045	}
5046	/*
5047	 * DATA chunk processing
5048	 */
5049	/* plow through the data chunks while length > offset */
5050
5051	/*
5052	 * Rest should be DATA only.  Check authentication state if AUTH for
5053	 * DATA is required.
5054	 */
5055	if ((length > offset) && (stcb != NULL) && !sctp_auth_disable &&
5056	    sctp_auth_is_required_chunk(SCTP_DATA,
5057	    stcb->asoc.local_auth_chunks) &&
5058	    !stcb->asoc.authenticated) {
5059		/* "silently" ignore */
5060		SCTP_STAT_INCR(sctps_recvauthmissing);
5061		SCTPDBG(SCTP_DEBUG_AUTH1,
5062		    "Data chunk requires AUTH, skipped\n");
5063		goto trigger_send;
5064	}
5065	if (length > offset) {
5066		int retval;
5067
5068		/*
5069		 * First check to make sure our state is correct. We would
5070		 * not get here unless we really did have a tag, so we don't
5071		 * abort if this happens, just dump the chunk silently.
5072		 */
5073		switch (SCTP_GET_STATE(&stcb->asoc)) {
5074		case SCTP_STATE_COOKIE_ECHOED:
5075			/*
5076			 * we consider data with valid tags in this state
5077			 * shows us the cookie-ack was lost. Imply it was
5078			 * there.
5079			 */
5080			if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
5081				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5082				    stcb->asoc.overall_error_count,
5083				    0,
5084				    SCTP_FROM_SCTP_INPUT,
5085				    __LINE__);
5086			}
5087			stcb->asoc.overall_error_count = 0;
5088			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
5089			break;
5090		case SCTP_STATE_COOKIE_WAIT:
5091			/*
5092			 * We consider OOTB any data sent during asoc setup.
5093			 */
5094			sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
5095			    vrf_id);
5096			SCTP_TCB_UNLOCK(stcb);
5097			goto out_now;
5098			/* sa_ignore NOTREACHED */
5099			break;
5100		case SCTP_STATE_EMPTY:	/* should not happen */
5101		case SCTP_STATE_INUSE:	/* should not happen */
5102		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
5103		case SCTP_STATE_SHUTDOWN_ACK_SENT:
5104		default:
5105			SCTP_TCB_UNLOCK(stcb);
5106			goto out_now;
5107			/* sa_ignore NOTREACHED */
5108			break;
5109		case SCTP_STATE_OPEN:
5110		case SCTP_STATE_SHUTDOWN_SENT:
5111			break;
5112		}
5113		/* take care of ECN, part 1. */
5114		if (stcb->asoc.ecn_allowed &&
5115		    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5116			sctp_process_ecn_marked_a(stcb, net, ecn_bits);
5117		}
5118		/* plow through the data chunks while length > offset */
5119		retval = sctp_process_data(mm, iphlen, &offset, length, sh,
5120		    inp, stcb, net, &high_tsn);
5121		if (retval == 2) {
5122			/*
5123			 * The association aborted, NO UNLOCK needed since
5124			 * the association is destroyed.
5125			 */
5126			goto out_now;
5127		}
5128		data_processed = 1;
5129		if (retval == 0) {
5130			/* take care of ecn part 2. */
5131			if (stcb->asoc.ecn_allowed &&
5132			    (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
5133				sctp_process_ecn_marked_b(stcb, net, high_tsn,
5134				    ecn_bits);
5135			}
5136		}
5137		/*
5138		 * Anything important needs to have been m_copy'ed in
5139		 * process_data
5140		 */
5141	}
5142	if ((data_processed == 0) && (fwd_tsn_seen)) {
5143		int was_a_gap = 0;
5144
5145		if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
5146		    stcb->asoc.cumulative_tsn, MAX_TSN)) {
5147			/* there was a gap before this data was processed */
5148			was_a_gap = 1;
5149		}
5150		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
5151		if (abort_flag) {
5152			/* Again, we aborted so NO UNLOCK needed */
5153			goto out_now;
5154		}
5155	}
5156	/* trigger send of any chunks in queue... */
5157trigger_send:
5158#ifdef SCTP_AUDITING_ENABLED
5159	sctp_audit_log(0xE0, 2);
5160	sctp_auditing(1, inp, stcb, net);
5161#endif
5162	SCTPDBG(SCTP_DEBUG_INPUT1,
5163	    "Check for chunk output prw:%d tqe:%d tf=%d\n",
5164	    stcb->asoc.peers_rwnd,
5165	    TAILQ_EMPTY(&stcb->asoc.control_send_queue),
5166	    stcb->asoc.total_flight);
5167	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
5168
5169	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
5170	    ((un_sent) &&
5171	    (stcb->asoc.peers_rwnd > 0 ||
5172	    (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
5173		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
5174		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5175		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
5176	}
5177#ifdef SCTP_AUDITING_ENABLED
5178	sctp_audit_log(0xE0, 3);
5179	sctp_auditing(2, inp, stcb, net);
5180#endif
5181	SCTP_TCB_UNLOCK(stcb);
5182out_now:
5183#ifdef INVARIANTS
5184	sctp_validate_no_locks(inp);
5185#endif
5186	return;
5187}
5188
5189
5190
5191void
5192sctp_input(i_pak, off)
5193	struct mbuf *i_pak;
5194	int off;
5195
5196{
5197#ifdef SCTP_MBUF_LOGGING
5198	struct mbuf *mat;
5199
5200#endif
5201	struct mbuf *m;
5202	int iphlen;
5203	uint32_t vrf_id = 0;
5204	uint8_t ecn_bits;
5205	struct ip *ip;
5206	struct sctphdr *sh;
5207	struct sctp_inpcb *inp = NULL;
5208
5209	uint32_t check, calc_check;
5210	struct sctp_nets *net;
5211	struct sctp_tcb *stcb = NULL;
5212	struct sctp_chunkhdr *ch;
5213	int refcount_up = 0;
5214	int length, mlen, offset;
5215
5216
5217	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
5218		SCTP_RELEASE_PKT(i_pak);
5219		return;
5220	}
5221	mlen = SCTP_HEADER_LEN(i_pak);
5222	iphlen = off;
5223	m = SCTP_HEADER_TO_CHAIN(i_pak);
5224
5225	net = NULL;
5226	SCTP_STAT_INCR(sctps_recvpackets);
5227	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
5228
5229
5230#ifdef SCTP_MBUF_LOGGING
5231	/* Log in any input mbufs */
5232	if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
5233		mat = m;
5234		while (mat) {
5235			if (SCTP_BUF_IS_EXTENDED(mat)) {
5236				sctp_log_mb(mat, SCTP_MBUF_INPUT);
5237			}
5238			mat = SCTP_BUF_NEXT(mat);
5239		}
5240	}
5241#endif
5242#ifdef  SCTP_PACKET_LOGGING
5243	if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
5244		sctp_packet_log(m, mlen);
5245#endif
5246	/*
5247	 * Must take out the iphlen, since mlen expects this (only effect lb
5248	 * case)
5249	 */
5250	mlen -= iphlen;
5251
5252	/*
5253	 * Get IP, SCTP, and first chunk header together in first mbuf.
5254	 */
5255	ip = mtod(m, struct ip *);
5256	offset = iphlen + sizeof(*sh) + sizeof(*ch);
5257	if (SCTP_BUF_LEN(m) < offset) {
5258		if ((m = m_pullup(m, offset)) == 0) {
5259			SCTP_STAT_INCR(sctps_hdrops);
5260			return;
5261		}
5262		ip = mtod(m, struct ip *);
5263	}
5264	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
5265	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
5266	SCTPDBG(SCTP_DEBUG_INPUT1,
5267	    "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
5268
5269	/* SCTP does not allow broadcasts or multicasts */
5270	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
5271		goto bad;
5272	}
5273	if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
5274		/*
5275		 * We only look at broadcast if its a front state, All
5276		 * others we will not have a tcb for anyway.
5277		 */
5278		goto bad;
5279	}
5280	/* validate SCTP checksum */
5281	check = sh->checksum;	/* save incoming checksum */
5282	if ((check == 0) && (sctp_no_csum_on_loopback) &&
5283	    ((ip->ip_src.s_addr == ip->ip_dst.s_addr) ||
5284	    (SCTP_IS_IT_LOOPBACK(m)))
5285	    ) {
5286		goto sctp_skip_csum_4;
5287	}
5288	sh->checksum = 0;	/* prepare for calc */
5289	calc_check = sctp_calculate_sum(m, &mlen, iphlen);
5290	if (calc_check != check) {
5291		SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
5292		    calc_check, check, m, mlen, iphlen);
5293
5294		stcb = sctp_findassociation_addr(m, iphlen,
5295		    offset - sizeof(*ch),
5296		    sh, ch, &inp, &net,
5297		    vrf_id);
5298		if ((inp) && (stcb)) {
5299			sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
5300			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5301		} else if ((inp != NULL) && (stcb == NULL)) {
5302			refcount_up = 1;
5303		}
5304		SCTP_STAT_INCR(sctps_badsum);
5305		SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5306		goto bad;
5307	}
5308	sh->checksum = calc_check;
5309sctp_skip_csum_4:
5310	/* destination port of 0 is illegal, based on RFC2960. */
5311	if (sh->dest_port == 0) {
5312		SCTP_STAT_INCR(sctps_hdrops);
5313		goto bad;
5314	}
5315	/* validate mbuf chain length with IP payload length */
5316	if (mlen < (ip->ip_len - iphlen)) {
5317		SCTP_STAT_INCR(sctps_hdrops);
5318		goto bad;
5319	}
5320	/*
5321	 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
5322	 * IP/SCTP/first chunk header...
5323	 */
5324	stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
5325	    sh, ch, &inp, &net, vrf_id);
5326	/* inp's ref-count increased && stcb locked */
5327	if (inp == NULL) {
5328		struct sctp_init_chunk *init_chk, chunk_buf;
5329
5330		SCTP_STAT_INCR(sctps_noport);
5331#ifdef ICMP_BANDLIM
5332		/*
5333		 * we use the bandwidth limiting to protect against sending
5334		 * too many ABORTS all at once. In this case these count the
5335		 * same as an ICMP message.
5336		 */
5337		if (badport_bandlim(0) < 0)
5338			goto bad;
5339#endif				/* ICMP_BANDLIM */
5340		SCTPDBG(SCTP_DEBUG_INPUT1,
5341		    "Sending a ABORT from packet entry!\n");
5342		if (ch->chunk_type == SCTP_INITIATION) {
5343			/*
5344			 * we do a trick here to get the INIT tag, dig in
5345			 * and get the tag from the INIT and put it in the
5346			 * common header.
5347			 */
5348			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
5349			    iphlen + sizeof(*sh), sizeof(*init_chk),
5350			    (uint8_t *) & chunk_buf);
5351			if (init_chk != NULL)
5352				sh->v_tag = init_chk->init.initiate_tag;
5353		}
5354		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5355			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
5356			goto bad;
5357		}
5358		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5359			goto bad;
5360		}
5361		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
5362			sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id);
5363		goto bad;
5364	} else if (stcb == NULL) {
5365		refcount_up = 1;
5366	}
5367#ifdef IPSEC
5368	/*
5369	 * I very much doubt any of the IPSEC stuff will work but I have no
5370	 * idea, so I will leave it in place.
5371	 */
5372	if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
5373		ipsec4stat.in_polvio++;
5374		SCTP_STAT_INCR(sctps_hdrops);
5375		goto bad;
5376	}
5377#endif				/* IPSEC */
5378
5379	/*
5380	 * common chunk processing
5381	 */
5382	length = ip->ip_len + iphlen;
5383	offset -= sizeof(struct sctp_chunkhdr);
5384
5385	ecn_bits = ip->ip_tos;
5386
5387	/* sa_ignore NO_NULL_CHK */
5388	sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
5389	    inp, stcb, net, ecn_bits, vrf_id);
5390	/* inp's ref-count reduced && stcb unlocked */
5391	if (m) {
5392		sctp_m_freem(m);
5393	}
5394	if ((inp) && (refcount_up)) {
5395		/* reduce ref-count */
5396		SCTP_INP_DECR_REF(inp);
5397	}
5398	return;
5399bad:
5400	if (stcb) {
5401		SCTP_TCB_UNLOCK(stcb);
5402	}
5403	if ((inp) && (refcount_up)) {
5404		/* reduce ref-count */
5405		SCTP_INP_DECR_REF(inp);
5406	}
5407	if (m) {
5408		sctp_m_freem(m);
5409	}
5410	return;
5411}
5412