sctputil.c revision 332172
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/netinet/sctputil.c 332172 2018-04-07 14:41:44Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctputil.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
41#ifdef INET6
42#include <netinet6/sctp6_var.h>
43#endif
44#include <netinet/sctp_header.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_asconf.h>
51#include <netinet/sctp_bsd_addr.h>
52#if defined(INET6) || defined(INET)
53#include <netinet/tcp_var.h>
54#endif
55#include <netinet/udp.h>
56#include <netinet/udp_var.h>
57#include <sys/proc.h>
58#ifdef INET6
59#include <netinet/icmp6.h>
60#endif
61
62
63#ifndef KTR_SCTP
64#define KTR_SCTP KTR_SUBSYS
65#endif
66
67extern const struct sctp_cc_functions sctp_cc_functions[];
68extern const struct sctp_ss_functions sctp_ss_functions[];
69
70void
71sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72{
73	struct sctp_cwnd_log sctp_clog;
74
75	sctp_clog.x.sb.stcb = stcb;
76	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
77	if (stcb)
78		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
79	else
80		sctp_clog.x.sb.stcb_sbcc = 0;
81	sctp_clog.x.sb.incr = incr;
82	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
83	    SCTP_LOG_EVENT_SB,
84	    from,
85	    sctp_clog.x.misc.log1,
86	    sctp_clog.x.misc.log2,
87	    sctp_clog.x.misc.log3,
88	    sctp_clog.x.misc.log4);
89}
90
91void
92sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
93{
94	struct sctp_cwnd_log sctp_clog;
95
96	sctp_clog.x.close.inp = (void *)inp;
97	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
98	if (stcb) {
99		sctp_clog.x.close.stcb = (void *)stcb;
100		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
101	} else {
102		sctp_clog.x.close.stcb = 0;
103		sctp_clog.x.close.state = 0;
104	}
105	sctp_clog.x.close.loc = loc;
106	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
107	    SCTP_LOG_EVENT_CLOSE,
108	    0,
109	    sctp_clog.x.misc.log1,
110	    sctp_clog.x.misc.log2,
111	    sctp_clog.x.misc.log3,
112	    sctp_clog.x.misc.log4);
113}
114
115void
116rto_logging(struct sctp_nets *net, int from)
117{
118	struct sctp_cwnd_log sctp_clog;
119
120	memset(&sctp_clog, 0, sizeof(sctp_clog));
121	sctp_clog.x.rto.net = (void *)net;
122	sctp_clog.x.rto.rtt = net->rtt / 1000;
123	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
124	    SCTP_LOG_EVENT_RTT,
125	    from,
126	    sctp_clog.x.misc.log1,
127	    sctp_clog.x.misc.log2,
128	    sctp_clog.x.misc.log3,
129	    sctp_clog.x.misc.log4);
130}
131
132void
133sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
134{
135	struct sctp_cwnd_log sctp_clog;
136
137	sctp_clog.x.strlog.stcb = stcb;
138	sctp_clog.x.strlog.n_tsn = tsn;
139	sctp_clog.x.strlog.n_sseq = sseq;
140	sctp_clog.x.strlog.e_tsn = 0;
141	sctp_clog.x.strlog.e_sseq = 0;
142	sctp_clog.x.strlog.strm = stream;
143	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
144	    SCTP_LOG_EVENT_STRM,
145	    from,
146	    sctp_clog.x.misc.log1,
147	    sctp_clog.x.misc.log2,
148	    sctp_clog.x.misc.log3,
149	    sctp_clog.x.misc.log4);
150}
151
152void
153sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154{
155	struct sctp_cwnd_log sctp_clog;
156
157	sctp_clog.x.nagle.stcb = (void *)stcb;
158	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163	    SCTP_LOG_EVENT_NAGLE,
164	    action,
165	    sctp_clog.x.misc.log1,
166	    sctp_clog.x.misc.log2,
167	    sctp_clog.x.misc.log3,
168	    sctp_clog.x.misc.log4);
169}
170
171void
172sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173{
174	struct sctp_cwnd_log sctp_clog;
175
176	sctp_clog.x.sack.cumack = cumack;
177	sctp_clog.x.sack.oldcumack = old_cumack;
178	sctp_clog.x.sack.tsn = tsn;
179	sctp_clog.x.sack.numGaps = gaps;
180	sctp_clog.x.sack.numDups = dups;
181	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182	    SCTP_LOG_EVENT_SACK,
183	    from,
184	    sctp_clog.x.misc.log1,
185	    sctp_clog.x.misc.log2,
186	    sctp_clog.x.misc.log3,
187	    sctp_clog.x.misc.log4);
188}
189
190void
191sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192{
193	struct sctp_cwnd_log sctp_clog;
194
195	memset(&sctp_clog, 0, sizeof(sctp_clog));
196	sctp_clog.x.map.base = map;
197	sctp_clog.x.map.cum = cum;
198	sctp_clog.x.map.high = high;
199	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200	    SCTP_LOG_EVENT_MAP,
201	    from,
202	    sctp_clog.x.misc.log1,
203	    sctp_clog.x.misc.log2,
204	    sctp_clog.x.misc.log3,
205	    sctp_clog.x.misc.log4);
206}
207
208void
209sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
210{
211	struct sctp_cwnd_log sctp_clog;
212
213	memset(&sctp_clog, 0, sizeof(sctp_clog));
214	sctp_clog.x.fr.largest_tsn = biggest_tsn;
215	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
216	sctp_clog.x.fr.tsn = tsn;
217	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
218	    SCTP_LOG_EVENT_FR,
219	    from,
220	    sctp_clog.x.misc.log1,
221	    sctp_clog.x.misc.log2,
222	    sctp_clog.x.misc.log3,
223	    sctp_clog.x.misc.log4);
224}
225
226#ifdef SCTP_MBUF_LOGGING
227void
228sctp_log_mb(struct mbuf *m, int from)
229{
230	struct sctp_cwnd_log sctp_clog;
231
232	sctp_clog.x.mb.mp = m;
233	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
234	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
235	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
236	if (SCTP_BUF_IS_EXTENDED(m)) {
237		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
238		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
239	} else {
240		sctp_clog.x.mb.ext = 0;
241		sctp_clog.x.mb.refcnt = 0;
242	}
243	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
244	    SCTP_LOG_EVENT_MBUF,
245	    from,
246	    sctp_clog.x.misc.log1,
247	    sctp_clog.x.misc.log2,
248	    sctp_clog.x.misc.log3,
249	    sctp_clog.x.misc.log4);
250}
251
252void
253sctp_log_mbc(struct mbuf *m, int from)
254{
255	struct mbuf *mat;
256
257	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
258		sctp_log_mb(mat, from);
259	}
260}
261#endif
262
263void
264sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
265{
266	struct sctp_cwnd_log sctp_clog;
267
268	if (control == NULL) {
269		SCTP_PRINTF("Gak log of NULL?\n");
270		return;
271	}
272	sctp_clog.x.strlog.stcb = control->stcb;
273	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
274	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
275	sctp_clog.x.strlog.strm = control->sinfo_stream;
276	if (poschk != NULL) {
277		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
278		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
279	} else {
280		sctp_clog.x.strlog.e_tsn = 0;
281		sctp_clog.x.strlog.e_sseq = 0;
282	}
283	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
284	    SCTP_LOG_EVENT_STRM,
285	    from,
286	    sctp_clog.x.misc.log1,
287	    sctp_clog.x.misc.log2,
288	    sctp_clog.x.misc.log3,
289	    sctp_clog.x.misc.log4);
290}
291
292void
293sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
294{
295	struct sctp_cwnd_log sctp_clog;
296
297	sctp_clog.x.cwnd.net = net;
298	if (stcb->asoc.send_queue_cnt > 255)
299		sctp_clog.x.cwnd.cnt_in_send = 255;
300	else
301		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
302	if (stcb->asoc.stream_queue_cnt > 255)
303		sctp_clog.x.cwnd.cnt_in_str = 255;
304	else
305		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
306
307	if (net) {
308		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
309		sctp_clog.x.cwnd.inflight = net->flight_size;
310		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
311		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
312		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
313	}
314	if (SCTP_CWNDLOG_PRESEND == from) {
315		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
316	}
317	sctp_clog.x.cwnd.cwnd_augment = augment;
318	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
319	    SCTP_LOG_EVENT_CWND,
320	    from,
321	    sctp_clog.x.misc.log1,
322	    sctp_clog.x.misc.log2,
323	    sctp_clog.x.misc.log3,
324	    sctp_clog.x.misc.log4);
325}
326
327void
328sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
329{
330	struct sctp_cwnd_log sctp_clog;
331
332	memset(&sctp_clog, 0, sizeof(sctp_clog));
333	if (inp) {
334		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
335
336	} else {
337		sctp_clog.x.lock.sock = (void *)NULL;
338	}
339	sctp_clog.x.lock.inp = (void *)inp;
340	if (stcb) {
341		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
342	} else {
343		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
344	}
345	if (inp) {
346		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
347		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
348	} else {
349		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
350		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
351	}
352	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
353	if (inp && (inp->sctp_socket)) {
354		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
355		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
356		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
357	} else {
358		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
359		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
360		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
361	}
362	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
363	    SCTP_LOG_LOCK_EVENT,
364	    from,
365	    sctp_clog.x.misc.log1,
366	    sctp_clog.x.misc.log2,
367	    sctp_clog.x.misc.log3,
368	    sctp_clog.x.misc.log4);
369}
370
371void
372sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
373{
374	struct sctp_cwnd_log sctp_clog;
375
376	memset(&sctp_clog, 0, sizeof(sctp_clog));
377	sctp_clog.x.cwnd.net = net;
378	sctp_clog.x.cwnd.cwnd_new_value = error;
379	sctp_clog.x.cwnd.inflight = net->flight_size;
380	sctp_clog.x.cwnd.cwnd_augment = burst;
381	if (stcb->asoc.send_queue_cnt > 255)
382		sctp_clog.x.cwnd.cnt_in_send = 255;
383	else
384		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
385	if (stcb->asoc.stream_queue_cnt > 255)
386		sctp_clog.x.cwnd.cnt_in_str = 255;
387	else
388		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
389	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390	    SCTP_LOG_EVENT_MAXBURST,
391	    from,
392	    sctp_clog.x.misc.log1,
393	    sctp_clog.x.misc.log2,
394	    sctp_clog.x.misc.log3,
395	    sctp_clog.x.misc.log4);
396}
397
398void
399sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
400{
401	struct sctp_cwnd_log sctp_clog;
402
403	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404	sctp_clog.x.rwnd.send_size = snd_size;
405	sctp_clog.x.rwnd.overhead = overhead;
406	sctp_clog.x.rwnd.new_rwnd = 0;
407	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408	    SCTP_LOG_EVENT_RWND,
409	    from,
410	    sctp_clog.x.misc.log1,
411	    sctp_clog.x.misc.log2,
412	    sctp_clog.x.misc.log3,
413	    sctp_clog.x.misc.log4);
414}
415
416void
417sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
418{
419	struct sctp_cwnd_log sctp_clog;
420
421	sctp_clog.x.rwnd.rwnd = peers_rwnd;
422	sctp_clog.x.rwnd.send_size = flight_size;
423	sctp_clog.x.rwnd.overhead = overhead;
424	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
425	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426	    SCTP_LOG_EVENT_RWND,
427	    from,
428	    sctp_clog.x.misc.log1,
429	    sctp_clog.x.misc.log2,
430	    sctp_clog.x.misc.log3,
431	    sctp_clog.x.misc.log4);
432}
433
434#ifdef SCTP_MBCNT_LOGGING
435static void
436sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
437{
438	struct sctp_cwnd_log sctp_clog;
439
440	sctp_clog.x.mbcnt.total_queue_size = total_oq;
441	sctp_clog.x.mbcnt.size_change = book;
442	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
443	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
444	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445	    SCTP_LOG_EVENT_MBCNT,
446	    from,
447	    sctp_clog.x.misc.log1,
448	    sctp_clog.x.misc.log2,
449	    sctp_clog.x.misc.log3,
450	    sctp_clog.x.misc.log4);
451}
452#endif
453
454void
455sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
456{
457	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458	    SCTP_LOG_MISC_EVENT,
459	    from,
460	    a, b, c, d);
461}
462
463void
464sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
465{
466	struct sctp_cwnd_log sctp_clog;
467
468	sctp_clog.x.wake.stcb = (void *)stcb;
469	sctp_clog.x.wake.wake_cnt = wake_cnt;
470	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
471	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
472	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
473
474	if (stcb->asoc.stream_queue_cnt < 0xff)
475		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
476	else
477		sctp_clog.x.wake.stream_qcnt = 0xff;
478
479	if (stcb->asoc.chunks_on_out_queue < 0xff)
480		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
481	else
482		sctp_clog.x.wake.chunks_on_oque = 0xff;
483
484	sctp_clog.x.wake.sctpflags = 0;
485	/* set in the defered mode stuff */
486	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
487		sctp_clog.x.wake.sctpflags |= 1;
488	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
489		sctp_clog.x.wake.sctpflags |= 2;
490	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
491		sctp_clog.x.wake.sctpflags |= 4;
492	/* what about the sb */
493	if (stcb->sctp_socket) {
494		struct socket *so = stcb->sctp_socket;
495
496		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
497	} else {
498		sctp_clog.x.wake.sbflags = 0xff;
499	}
500	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501	    SCTP_LOG_EVENT_WAKE,
502	    from,
503	    sctp_clog.x.misc.log1,
504	    sctp_clog.x.misc.log2,
505	    sctp_clog.x.misc.log3,
506	    sctp_clog.x.misc.log4);
507}
508
509void
510sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
511{
512	struct sctp_cwnd_log sctp_clog;
513
514	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
515	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
516	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
517	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
518	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
519	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
520	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
521	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
522	    SCTP_LOG_EVENT_BLOCK,
523	    from,
524	    sctp_clog.x.misc.log1,
525	    sctp_clog.x.misc.log2,
526	    sctp_clog.x.misc.log3,
527	    sctp_clog.x.misc.log4);
528}
529
530int
531sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
532{
533	/* May need to fix this if ktrdump does not work */
534	return (0);
535}
536
537#ifdef SCTP_AUDITING_ENABLED
538uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
539static int sctp_audit_indx = 0;
540
541static
542void
543sctp_print_audit_report(void)
544{
545	int i;
546	int cnt;
547
548	cnt = 0;
549	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
550		if ((sctp_audit_data[i][0] == 0xe0) &&
551		    (sctp_audit_data[i][1] == 0x01)) {
552			cnt = 0;
553			SCTP_PRINTF("\n");
554		} else if (sctp_audit_data[i][0] == 0xf0) {
555			cnt = 0;
556			SCTP_PRINTF("\n");
557		} else if ((sctp_audit_data[i][0] == 0xc0) &&
558		    (sctp_audit_data[i][1] == 0x01)) {
559			SCTP_PRINTF("\n");
560			cnt = 0;
561		}
562		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
563		    (uint32_t)sctp_audit_data[i][1]);
564		cnt++;
565		if ((cnt % 14) == 0)
566			SCTP_PRINTF("\n");
567	}
568	for (i = 0; i < sctp_audit_indx; i++) {
569		if ((sctp_audit_data[i][0] == 0xe0) &&
570		    (sctp_audit_data[i][1] == 0x01)) {
571			cnt = 0;
572			SCTP_PRINTF("\n");
573		} else if (sctp_audit_data[i][0] == 0xf0) {
574			cnt = 0;
575			SCTP_PRINTF("\n");
576		} else if ((sctp_audit_data[i][0] == 0xc0) &&
577		    (sctp_audit_data[i][1] == 0x01)) {
578			SCTP_PRINTF("\n");
579			cnt = 0;
580		}
581		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
582		    (uint32_t)sctp_audit_data[i][1]);
583		cnt++;
584		if ((cnt % 14) == 0)
585			SCTP_PRINTF("\n");
586	}
587	SCTP_PRINTF("\n");
588}
589
590void
591sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
592    struct sctp_nets *net)
593{
594	int resend_cnt, tot_out, rep, tot_book_cnt;
595	struct sctp_nets *lnet;
596	struct sctp_tmit_chunk *chk;
597
598	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
599	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
600	sctp_audit_indx++;
601	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602		sctp_audit_indx = 0;
603	}
604	if (inp == NULL) {
605		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606		sctp_audit_data[sctp_audit_indx][1] = 0x01;
607		sctp_audit_indx++;
608		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609			sctp_audit_indx = 0;
610		}
611		return;
612	}
613	if (stcb == NULL) {
614		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
615		sctp_audit_data[sctp_audit_indx][1] = 0x02;
616		sctp_audit_indx++;
617		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618			sctp_audit_indx = 0;
619		}
620		return;
621	}
622	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
623	sctp_audit_data[sctp_audit_indx][1] =
624	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
625	sctp_audit_indx++;
626	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627		sctp_audit_indx = 0;
628	}
629	rep = 0;
630	tot_book_cnt = 0;
631	resend_cnt = tot_out = 0;
632	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
633		if (chk->sent == SCTP_DATAGRAM_RESEND) {
634			resend_cnt++;
635		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
636			tot_out += chk->book_size;
637			tot_book_cnt++;
638		}
639	}
640	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
641		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
643		sctp_audit_indx++;
644		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645			sctp_audit_indx = 0;
646		}
647		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
648		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
649		rep = 1;
650		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
651		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
652		sctp_audit_data[sctp_audit_indx][1] =
653		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
654		sctp_audit_indx++;
655		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656			sctp_audit_indx = 0;
657		}
658	}
659	if (tot_out != stcb->asoc.total_flight) {
660		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
661		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
662		sctp_audit_indx++;
663		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
664			sctp_audit_indx = 0;
665		}
666		rep = 1;
667		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
668		    (int)stcb->asoc.total_flight);
669		stcb->asoc.total_flight = tot_out;
670	}
671	if (tot_book_cnt != stcb->asoc.total_flight_count) {
672		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
673		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
674		sctp_audit_indx++;
675		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676			sctp_audit_indx = 0;
677		}
678		rep = 1;
679		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
680
681		stcb->asoc.total_flight_count = tot_book_cnt;
682	}
683	tot_out = 0;
684	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
685		tot_out += lnet->flight_size;
686	}
687	if (tot_out != stcb->asoc.total_flight) {
688		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
689		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
690		sctp_audit_indx++;
691		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692			sctp_audit_indx = 0;
693		}
694		rep = 1;
695		SCTP_PRINTF("real flight:%d net total was %d\n",
696		    stcb->asoc.total_flight, tot_out);
697		/* now corrective action */
698		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
699
700			tot_out = 0;
701			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
702				if ((chk->whoTo == lnet) &&
703				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
704					tot_out += chk->book_size;
705				}
706			}
707			if (lnet->flight_size != tot_out) {
708				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
709				    (void *)lnet, lnet->flight_size,
710				    tot_out);
711				lnet->flight_size = tot_out;
712			}
713		}
714	}
715	if (rep) {
716		sctp_print_audit_report();
717	}
718}
719
720void
721sctp_audit_log(uint8_t ev, uint8_t fd)
722{
723
724	sctp_audit_data[sctp_audit_indx][0] = ev;
725	sctp_audit_data[sctp_audit_indx][1] = fd;
726	sctp_audit_indx++;
727	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728		sctp_audit_indx = 0;
729	}
730}
731
732#endif
733
734/*
735 * sctp_stop_timers_for_shutdown() should be called
736 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
737 * state to make sure that all timers are stopped.
738 */
739void
740sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
741{
742	struct sctp_association *asoc;
743	struct sctp_nets *net;
744
745	asoc = &stcb->asoc;
746
747	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
748	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
749	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
750	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
751	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
752	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
753		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
754		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
755	}
756}
757
758/*
759 * a list of sizes based on typical mtu's, used only if next hop size not
760 * returned.
761 */
762static uint32_t sctp_mtu_sizes[] = {
763	68,
764	296,
765	508,
766	512,
767	544,
768	576,
769	1006,
770	1492,
771	1500,
772	1536,
773	2002,
774	2048,
775	4352,
776	4464,
777	8166,
778	17914,
779	32000,
780	65535
781};
782
783/*
784 * Return the largest MTU smaller than val. If there is no
785 * entry, just return val.
786 */
787uint32_t
788sctp_get_prev_mtu(uint32_t val)
789{
790	uint32_t i;
791
792	if (val <= sctp_mtu_sizes[0]) {
793		return (val);
794	}
795	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796		if (val <= sctp_mtu_sizes[i]) {
797			break;
798		}
799	}
800	return (sctp_mtu_sizes[i - 1]);
801}
802
803/*
804 * Return the smallest MTU larger than val. If there is no
805 * entry, just return val.
806 */
807uint32_t
808sctp_get_next_mtu(uint32_t val)
809{
810	/* select another MTU that is just bigger than this one */
811	uint32_t i;
812
813	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
814		if (val < sctp_mtu_sizes[i]) {
815			return (sctp_mtu_sizes[i]);
816		}
817	}
818	return (val);
819}
820
821void
822sctp_fill_random_store(struct sctp_pcb *m)
823{
824	/*
825	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
826	 * our counter. The result becomes our good random numbers and we
827	 * then setup to give these out. Note that we do no locking to
828	 * protect this. This is ok, since if competing folks call this we
829	 * will get more gobbled gook in the random store which is what we
830	 * want. There is a danger that two guys will use the same random
831	 * numbers, but thats ok too since that is random as well :->
832	 */
833	m->store_at = 0;
834	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
835	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
836	    sizeof(m->random_counter), (uint8_t *)m->random_store);
837	m->random_counter++;
838}
839
840uint32_t
841sctp_select_initial_TSN(struct sctp_pcb *inp)
842{
843	/*
844	 * A true implementation should use random selection process to get
845	 * the initial stream sequence number, using RFC1750 as a good
846	 * guideline
847	 */
848	uint32_t x, *xp;
849	uint8_t *p;
850	int store_at, new_store;
851
852	if (inp->initial_sequence_debug != 0) {
853		uint32_t ret;
854
855		ret = inp->initial_sequence_debug;
856		inp->initial_sequence_debug++;
857		return (ret);
858	}
859retry:
860	store_at = inp->store_at;
861	new_store = store_at + sizeof(uint32_t);
862	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
863		new_store = 0;
864	}
865	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
866		goto retry;
867	}
868	if (new_store == 0) {
869		/* Refill the random store */
870		sctp_fill_random_store(inp);
871	}
872	p = &inp->random_store[store_at];
873	xp = (uint32_t *)p;
874	x = *xp;
875	return (x);
876}
877
878uint32_t
879sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
880{
881	uint32_t x;
882	struct timeval now;
883
884	if (check) {
885		(void)SCTP_GETTIME_TIMEVAL(&now);
886	}
887	for (;;) {
888		x = sctp_select_initial_TSN(&inp->sctp_ep);
889		if (x == 0) {
890			/* we never use 0 */
891			continue;
892		}
893		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
894			break;
895		}
896	}
897	return (x);
898}
899
900int32_t
901sctp_map_assoc_state(int kernel_state)
902{
903	int32_t user_state;
904
905	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
906		user_state = SCTP_CLOSED;
907	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
908		user_state = SCTP_SHUTDOWN_PENDING;
909	} else {
910		switch (kernel_state & SCTP_STATE_MASK) {
911		case SCTP_STATE_EMPTY:
912			user_state = SCTP_CLOSED;
913			break;
914		case SCTP_STATE_INUSE:
915			user_state = SCTP_CLOSED;
916			break;
917		case SCTP_STATE_COOKIE_WAIT:
918			user_state = SCTP_COOKIE_WAIT;
919			break;
920		case SCTP_STATE_COOKIE_ECHOED:
921			user_state = SCTP_COOKIE_ECHOED;
922			break;
923		case SCTP_STATE_OPEN:
924			user_state = SCTP_ESTABLISHED;
925			break;
926		case SCTP_STATE_SHUTDOWN_SENT:
927			user_state = SCTP_SHUTDOWN_SENT;
928			break;
929		case SCTP_STATE_SHUTDOWN_RECEIVED:
930			user_state = SCTP_SHUTDOWN_RECEIVED;
931			break;
932		case SCTP_STATE_SHUTDOWN_ACK_SENT:
933			user_state = SCTP_SHUTDOWN_ACK_SENT;
934			break;
935		default:
936			user_state = SCTP_CLOSED;
937			break;
938		}
939	}
940	return (user_state);
941}
942
943int
944sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
945    uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
946{
947	struct sctp_association *asoc;
948
949	/*
950	 * Anything set to zero is taken care of by the allocation routine's
951	 * bzero
952	 */
953
954	/*
955	 * Up front select what scoping to apply on addresses I tell my peer
956	 * Not sure what to do with these right now, we will need to come up
957	 * with a way to set them. We may need to pass them through from the
958	 * caller in the sctp_aloc_assoc() function.
959	 */
960	int i;
961#if defined(SCTP_DETAILED_STR_STATS)
962	int j;
963#endif
964
965	asoc = &stcb->asoc;
966	/* init all variables to a known value. */
967	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
968	asoc->max_burst = inp->sctp_ep.max_burst;
969	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
970	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
971	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
972	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
973	asoc->ecn_supported = inp->ecn_supported;
974	asoc->prsctp_supported = inp->prsctp_supported;
975	asoc->idata_supported = inp->idata_supported;
976	asoc->auth_supported = inp->auth_supported;
977	asoc->asconf_supported = inp->asconf_supported;
978	asoc->reconfig_supported = inp->reconfig_supported;
979	asoc->nrsack_supported = inp->nrsack_supported;
980	asoc->pktdrop_supported = inp->pktdrop_supported;
981	asoc->idata_supported = inp->idata_supported;
982	asoc->sctp_cmt_pf = (uint8_t)0;
983	asoc->sctp_frag_point = inp->sctp_frag_point;
984	asoc->sctp_features = inp->sctp_features;
985	asoc->default_dscp = inp->sctp_ep.default_dscp;
986	asoc->max_cwnd = inp->max_cwnd;
987#ifdef INET6
988	if (inp->sctp_ep.default_flowlabel) {
989		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
990	} else {
991		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
992			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
993			asoc->default_flowlabel &= 0x000fffff;
994			asoc->default_flowlabel |= 0x80000000;
995		} else {
996			asoc->default_flowlabel = 0;
997		}
998	}
999#endif
1000	asoc->sb_send_resv = 0;
1001	if (override_tag) {
1002		asoc->my_vtag = override_tag;
1003	} else {
1004		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1005	}
1006	/* Get the nonce tags */
1007	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1008	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009	asoc->vrf_id = vrf_id;
1010
1011#ifdef SCTP_ASOCLOG_OF_TSNS
1012	asoc->tsn_in_at = 0;
1013	asoc->tsn_out_at = 0;
1014	asoc->tsn_in_wrapped = 0;
1015	asoc->tsn_out_wrapped = 0;
1016	asoc->cumack_log_at = 0;
1017	asoc->cumack_log_atsnt = 0;
1018#endif
1019#ifdef SCTP_FS_SPEC_LOG
1020	asoc->fs_index = 0;
1021#endif
1022	asoc->refcnt = 0;
1023	asoc->assoc_up_sent = 0;
1024	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1025	    sctp_select_initial_TSN(&inp->sctp_ep);
1026	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1027	/* we are optimisitic here */
1028	asoc->peer_supports_nat = 0;
1029	asoc->sent_queue_retran_cnt = 0;
1030
1031	/* for CMT */
1032	asoc->last_net_cmt_send_started = NULL;
1033
1034	/* This will need to be adjusted */
1035	asoc->last_acked_seq = asoc->init_seq_number - 1;
1036	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1037	asoc->asconf_seq_in = asoc->last_acked_seq;
1038
1039	/* here we are different, we hold the next one we expect */
1040	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1041
1042	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1043	asoc->initial_rto = inp->sctp_ep.initial_rto;
1044
1045	asoc->max_init_times = inp->sctp_ep.max_init_times;
1046	asoc->max_send_times = inp->sctp_ep.max_send_times;
1047	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1048	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1049	asoc->free_chunk_cnt = 0;
1050
1051	asoc->iam_blocking = 0;
1052	asoc->context = inp->sctp_context;
1053	asoc->local_strreset_support = inp->local_strreset_support;
1054	asoc->def_send = inp->def_send;
1055	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1056	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1057	asoc->pr_sctp_cnt = 0;
1058	asoc->total_output_queue_size = 0;
1059
1060	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1061		asoc->scope.ipv6_addr_legal = 1;
1062		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1063			asoc->scope.ipv4_addr_legal = 1;
1064		} else {
1065			asoc->scope.ipv4_addr_legal = 0;
1066		}
1067	} else {
1068		asoc->scope.ipv6_addr_legal = 0;
1069		asoc->scope.ipv4_addr_legal = 1;
1070	}
1071
1072	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1073	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1074
1075	asoc->smallest_mtu = inp->sctp_frag_point;
1076	asoc->minrto = inp->sctp_ep.sctp_minrto;
1077	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1078
1079	asoc->stream_locked_on = 0;
1080	asoc->ecn_echo_cnt_onq = 0;
1081	asoc->stream_locked = 0;
1082
1083	asoc->send_sack = 1;
1084
1085	LIST_INIT(&asoc->sctp_restricted_addrs);
1086
1087	TAILQ_INIT(&asoc->nets);
1088	TAILQ_INIT(&asoc->pending_reply_queue);
1089	TAILQ_INIT(&asoc->asconf_ack_sent);
1090	/* Setup to fill the hb random cache at first HB */
1091	asoc->hb_random_idx = 4;
1092
1093	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1094
1095	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1096	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1097
1098	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1099	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1100
1101	/*
1102	 * Now the stream parameters, here we allocate space for all streams
1103	 * that we request by default.
1104	 */
1105	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106	    o_strms;
1107	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109	    SCTP_M_STRMO);
1110	if (asoc->strmout == NULL) {
1111		/* big trouble no memory */
1112		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113		return (ENOMEM);
1114	}
1115	for (i = 0; i < asoc->streamoutcnt; i++) {
1116		/*
1117		 * inbound side must be set to 0xffff, also NOTE when we get
1118		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119		 * count (streamoutcnt) but first check if we sent to any of
1120		 * the upper streams that were dropped (if some were). Those
1121		 * that were dropped must be notified to the upper layer as
1122		 * failed to send.
1123		 */
1124		asoc->strmout[i].next_mid_ordered = 0;
1125		asoc->strmout[i].next_mid_unordered = 0;
1126		TAILQ_INIT(&asoc->strmout[i].outqueue);
1127		asoc->strmout[i].chunks_on_queues = 0;
1128#if defined(SCTP_DETAILED_STR_STATS)
1129		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1130			asoc->strmout[i].abandoned_sent[j] = 0;
1131			asoc->strmout[i].abandoned_unsent[j] = 0;
1132		}
1133#else
1134		asoc->strmout[i].abandoned_sent[0] = 0;
1135		asoc->strmout[i].abandoned_unsent[0] = 0;
1136#endif
1137		asoc->strmout[i].sid = i;
1138		asoc->strmout[i].last_msg_incomplete = 0;
1139		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1140		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1141	}
1142	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1143
1144	/* Now the mapping array */
1145	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1146	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1147	    SCTP_M_MAP);
1148	if (asoc->mapping_array == NULL) {
1149		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1150		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1151		return (ENOMEM);
1152	}
1153	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1154	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1155	    SCTP_M_MAP);
1156	if (asoc->nr_mapping_array == NULL) {
1157		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1158		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1159		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1160		return (ENOMEM);
1161	}
1162	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1163
1164	/* Now the init of the other outqueues */
1165	TAILQ_INIT(&asoc->free_chunks);
1166	TAILQ_INIT(&asoc->control_send_queue);
1167	TAILQ_INIT(&asoc->asconf_send_queue);
1168	TAILQ_INIT(&asoc->send_queue);
1169	TAILQ_INIT(&asoc->sent_queue);
1170	TAILQ_INIT(&asoc->resetHead);
1171	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1172	TAILQ_INIT(&asoc->asconf_queue);
1173	/* authentication fields */
1174	asoc->authinfo.random = NULL;
1175	asoc->authinfo.active_keyid = 0;
1176	asoc->authinfo.assoc_key = NULL;
1177	asoc->authinfo.assoc_keyid = 0;
1178	asoc->authinfo.recv_key = NULL;
1179	asoc->authinfo.recv_keyid = 0;
1180	LIST_INIT(&asoc->shared_keys);
1181	asoc->marked_retrans = 0;
1182	asoc->port = inp->sctp_ep.port;
1183	asoc->timoinit = 0;
1184	asoc->timodata = 0;
1185	asoc->timosack = 0;
1186	asoc->timoshutdown = 0;
1187	asoc->timoheartbeat = 0;
1188	asoc->timocookie = 0;
1189	asoc->timoshutdownack = 0;
1190	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1191	asoc->discontinuity_time = asoc->start_time;
1192	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1193		asoc->abandoned_unsent[i] = 0;
1194		asoc->abandoned_sent[i] = 0;
1195	}
1196	/*
1197	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1198	 * freed later when the association is freed.
1199	 */
1200	return (0);
1201}
1202
1203void
1204sctp_print_mapping_array(struct sctp_association *asoc)
1205{
1206	unsigned int i, limit;
1207
1208	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1209	    asoc->mapping_array_size,
1210	    asoc->mapping_array_base_tsn,
1211	    asoc->cumulative_tsn,
1212	    asoc->highest_tsn_inside_map,
1213	    asoc->highest_tsn_inside_nr_map);
1214	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1215		if (asoc->mapping_array[limit - 1] != 0) {
1216			break;
1217		}
1218	}
1219	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1220	for (i = 0; i < limit; i++) {
1221		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1222	}
1223	if (limit % 16)
1224		SCTP_PRINTF("\n");
1225	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1226		if (asoc->nr_mapping_array[limit - 1]) {
1227			break;
1228		}
1229	}
1230	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1231	for (i = 0; i < limit; i++) {
1232		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1233	}
1234	if (limit % 16)
1235		SCTP_PRINTF("\n");
1236}
1237
1238int
1239sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1240{
1241	/* mapping array needs to grow */
1242	uint8_t *new_array1, *new_array2;
1243	uint32_t new_size;
1244
1245	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1246	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1247	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1248	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1249		/* can't get more, forget it */
1250		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1251		if (new_array1) {
1252			SCTP_FREE(new_array1, SCTP_M_MAP);
1253		}
1254		if (new_array2) {
1255			SCTP_FREE(new_array2, SCTP_M_MAP);
1256		}
1257		return (-1);
1258	}
1259	memset(new_array1, 0, new_size);
1260	memset(new_array2, 0, new_size);
1261	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1262	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1263	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1264	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1265	asoc->mapping_array = new_array1;
1266	asoc->nr_mapping_array = new_array2;
1267	asoc->mapping_array_size = new_size;
1268	return (0);
1269}
1270
1271
1272static void
1273sctp_iterator_work(struct sctp_iterator *it)
1274{
1275	int iteration_count = 0;
1276	int inp_skip = 0;
1277	int first_in = 1;
1278	struct sctp_inpcb *tinp;
1279
1280	SCTP_INP_INFO_RLOCK();
1281	SCTP_ITERATOR_LOCK();
1282	sctp_it_ctl.cur_it = it;
1283	if (it->inp) {
1284		SCTP_INP_RLOCK(it->inp);
1285		SCTP_INP_DECR_REF(it->inp);
1286	}
1287	if (it->inp == NULL) {
1288		/* iterator is complete */
1289done_with_iterator:
1290		sctp_it_ctl.cur_it = NULL;
1291		SCTP_ITERATOR_UNLOCK();
1292		SCTP_INP_INFO_RUNLOCK();
1293		if (it->function_atend != NULL) {
1294			(*it->function_atend) (it->pointer, it->val);
1295		}
1296		SCTP_FREE(it, SCTP_M_ITER);
1297		return;
1298	}
1299select_a_new_ep:
1300	if (first_in) {
1301		first_in = 0;
1302	} else {
1303		SCTP_INP_RLOCK(it->inp);
1304	}
1305	while (((it->pcb_flags) &&
1306	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1307	    ((it->pcb_features) &&
1308	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1309		/* endpoint flags or features don't match, so keep looking */
1310		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311			SCTP_INP_RUNLOCK(it->inp);
1312			goto done_with_iterator;
1313		}
1314		tinp = it->inp;
1315		it->inp = LIST_NEXT(it->inp, sctp_list);
1316		SCTP_INP_RUNLOCK(tinp);
1317		if (it->inp == NULL) {
1318			goto done_with_iterator;
1319		}
1320		SCTP_INP_RLOCK(it->inp);
1321	}
1322	/* now go through each assoc which is in the desired state */
1323	if (it->done_current_ep == 0) {
1324		if (it->function_inp != NULL)
1325			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1326		it->done_current_ep = 1;
1327	}
1328	if (it->stcb == NULL) {
1329		/* run the per instance function */
1330		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1331	}
1332	if ((inp_skip) || it->stcb == NULL) {
1333		if (it->function_inp_end != NULL) {
1334			inp_skip = (*it->function_inp_end) (it->inp,
1335			    it->pointer,
1336			    it->val);
1337		}
1338		SCTP_INP_RUNLOCK(it->inp);
1339		goto no_stcb;
1340	}
1341	while (it->stcb) {
1342		SCTP_TCB_LOCK(it->stcb);
1343		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1344			/* not in the right state... keep looking */
1345			SCTP_TCB_UNLOCK(it->stcb);
1346			goto next_assoc;
1347		}
1348		/* see if we have limited out the iterator loop */
1349		iteration_count++;
1350		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1351			/* Pause to let others grab the lock */
1352			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1353			SCTP_TCB_UNLOCK(it->stcb);
1354			SCTP_INP_INCR_REF(it->inp);
1355			SCTP_INP_RUNLOCK(it->inp);
1356			SCTP_ITERATOR_UNLOCK();
1357			SCTP_INP_INFO_RUNLOCK();
1358			SCTP_INP_INFO_RLOCK();
1359			SCTP_ITERATOR_LOCK();
1360			if (sctp_it_ctl.iterator_flags) {
1361				/* We won't be staying here */
1362				SCTP_INP_DECR_REF(it->inp);
1363				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1364				if (sctp_it_ctl.iterator_flags &
1365				    SCTP_ITERATOR_STOP_CUR_IT) {
1366					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1367					goto done_with_iterator;
1368				}
1369				if (sctp_it_ctl.iterator_flags &
1370				    SCTP_ITERATOR_STOP_CUR_INP) {
1371					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1372					goto no_stcb;
1373				}
1374				/* If we reach here huh? */
1375				SCTP_PRINTF("Unknown it ctl flag %x\n",
1376				    sctp_it_ctl.iterator_flags);
1377				sctp_it_ctl.iterator_flags = 0;
1378			}
1379			SCTP_INP_RLOCK(it->inp);
1380			SCTP_INP_DECR_REF(it->inp);
1381			SCTP_TCB_LOCK(it->stcb);
1382			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1383			iteration_count = 0;
1384		}
1385		/* run function on this one */
1386		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1387
1388		/*
1389		 * we lie here, it really needs to have its own type but
1390		 * first I must verify that this won't effect things :-0
1391		 */
1392		if (it->no_chunk_output == 0)
1393			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1394
1395		SCTP_TCB_UNLOCK(it->stcb);
1396next_assoc:
1397		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1398		if (it->stcb == NULL) {
1399			/* Run last function */
1400			if (it->function_inp_end != NULL) {
1401				inp_skip = (*it->function_inp_end) (it->inp,
1402				    it->pointer,
1403				    it->val);
1404			}
1405		}
1406	}
1407	SCTP_INP_RUNLOCK(it->inp);
1408no_stcb:
1409	/* done with all assocs on this endpoint, move on to next endpoint */
1410	it->done_current_ep = 0;
1411	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1412		it->inp = NULL;
1413	} else {
1414		it->inp = LIST_NEXT(it->inp, sctp_list);
1415	}
1416	if (it->inp == NULL) {
1417		goto done_with_iterator;
1418	}
1419	goto select_a_new_ep;
1420}
1421
1422void
1423sctp_iterator_worker(void)
1424{
1425	struct sctp_iterator *it, *nit;
1426
1427	/* This function is called with the WQ lock in place */
1428
1429	sctp_it_ctl.iterator_running = 1;
1430	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1431		/* now lets work on this one */
1432		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434		CURVNET_SET(it->vn);
1435		sctp_iterator_work(it);
1436		CURVNET_RESTORE();
1437		SCTP_IPI_ITERATOR_WQ_LOCK();
1438		/* sa_ignore FREED_MEMORY */
1439	}
1440	sctp_it_ctl.iterator_running = 0;
1441	return;
1442}
1443
1444
1445static void
1446sctp_handle_addr_wq(void)
1447{
1448	/* deal with the ADDR wq from the rtsock calls */
1449	struct sctp_laddr *wi, *nwi;
1450	struct sctp_asconf_iterator *asc;
1451
1452	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1453	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1454	if (asc == NULL) {
1455		/* Try later, no memory */
1456		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1457		    (struct sctp_inpcb *)NULL,
1458		    (struct sctp_tcb *)NULL,
1459		    (struct sctp_nets *)NULL);
1460		return;
1461	}
1462	LIST_INIT(&asc->list_of_work);
1463	asc->cnt = 0;
1464
1465	SCTP_WQ_ADDR_LOCK();
1466	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1467		LIST_REMOVE(wi, sctp_nxt_addr);
1468		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1469		asc->cnt++;
1470	}
1471	SCTP_WQ_ADDR_UNLOCK();
1472
1473	if (asc->cnt == 0) {
1474		SCTP_FREE(asc, SCTP_M_ASC_IT);
1475	} else {
1476		int ret;
1477
1478		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1479		    sctp_asconf_iterator_stcb,
1480		    NULL,	/* No ep end for boundall */
1481		    SCTP_PCB_FLAGS_BOUNDALL,
1482		    SCTP_PCB_ANY_FEATURES,
1483		    SCTP_ASOC_ANY_STATE,
1484		    (void *)asc, 0,
1485		    sctp_asconf_iterator_end, NULL, 0);
1486		if (ret) {
1487			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1488			/*
1489			 * Freeing if we are stopping or put back on the
1490			 * addr_wq.
1491			 */
1492			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1493				sctp_asconf_iterator_end(asc, 0);
1494			} else {
1495				SCTP_WQ_ADDR_LOCK();
1496				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1497					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1498				}
1499				SCTP_WQ_ADDR_UNLOCK();
1500				SCTP_FREE(asc, SCTP_M_ASC_IT);
1501			}
1502		}
1503	}
1504}
1505
1506void
1507sctp_timeout_handler(void *t)
1508{
1509	struct sctp_inpcb *inp;
1510	struct sctp_tcb *stcb;
1511	struct sctp_nets *net;
1512	struct sctp_timer *tmr;
1513	struct mbuf *op_err;
1514#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1515	struct socket *so;
1516#endif
1517	int did_output;
1518	int type;
1519
1520	tmr = (struct sctp_timer *)t;
1521	inp = (struct sctp_inpcb *)tmr->ep;
1522	stcb = (struct sctp_tcb *)tmr->tcb;
1523	net = (struct sctp_nets *)tmr->net;
1524	CURVNET_SET((struct vnet *)tmr->vnet);
1525	did_output = 1;
1526
1527#ifdef SCTP_AUDITING_ENABLED
1528	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1529	sctp_auditing(3, inp, stcb, net);
1530#endif
1531
1532	/* sanity checks... */
1533	if (tmr->self != (void *)tmr) {
1534		/*
1535		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1536		 * (void *)tmr);
1537		 */
1538		CURVNET_RESTORE();
1539		return;
1540	}
1541	tmr->stopped_from = 0xa001;
1542	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1543		/*
1544		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1545		 * tmr->type);
1546		 */
1547		CURVNET_RESTORE();
1548		return;
1549	}
1550	tmr->stopped_from = 0xa002;
1551	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1552		CURVNET_RESTORE();
1553		return;
1554	}
1555	/* if this is an iterator timeout, get the struct and clear inp */
1556	tmr->stopped_from = 0xa003;
1557	if (inp) {
1558		SCTP_INP_INCR_REF(inp);
1559		if ((inp->sctp_socket == NULL) &&
1560		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1561		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1562		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1563		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1564		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1565		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1566		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1567		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1568		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1569		    ) {
1570			SCTP_INP_DECR_REF(inp);
1571			CURVNET_RESTORE();
1572			return;
1573		}
1574	}
1575	tmr->stopped_from = 0xa004;
1576	if (stcb) {
1577		atomic_add_int(&stcb->asoc.refcnt, 1);
1578		if (stcb->asoc.state == 0) {
1579			atomic_add_int(&stcb->asoc.refcnt, -1);
1580			if (inp) {
1581				SCTP_INP_DECR_REF(inp);
1582			}
1583			CURVNET_RESTORE();
1584			return;
1585		}
1586	}
1587	type = tmr->type;
1588	tmr->stopped_from = 0xa005;
1589	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1590	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1591		if (inp) {
1592			SCTP_INP_DECR_REF(inp);
1593		}
1594		if (stcb) {
1595			atomic_add_int(&stcb->asoc.refcnt, -1);
1596		}
1597		CURVNET_RESTORE();
1598		return;
1599	}
1600	tmr->stopped_from = 0xa006;
1601
1602	if (stcb) {
1603		SCTP_TCB_LOCK(stcb);
1604		atomic_add_int(&stcb->asoc.refcnt, -1);
1605		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1606		    ((stcb->asoc.state == 0) ||
1607		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1608			SCTP_TCB_UNLOCK(stcb);
1609			if (inp) {
1610				SCTP_INP_DECR_REF(inp);
1611			}
1612			CURVNET_RESTORE();
1613			return;
1614		}
1615	}
1616	/* record in stopped what t-o occurred */
1617	tmr->stopped_from = type;
1618
1619	/* mark as being serviced now */
1620	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1621		/*
1622		 * Callout has been rescheduled.
1623		 */
1624		goto get_out;
1625	}
1626	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1627		/*
1628		 * Not active, so no action.
1629		 */
1630		goto get_out;
1631	}
1632	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1633
1634	/* call the handler for the appropriate timer type */
1635	switch (type) {
1636	case SCTP_TIMER_TYPE_ZERO_COPY:
1637		if (inp == NULL) {
1638			break;
1639		}
1640		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1641			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1642		}
1643		break;
1644	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1645		if (inp == NULL) {
1646			break;
1647		}
1648		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1649			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1650		}
1651		break;
1652	case SCTP_TIMER_TYPE_ADDR_WQ:
1653		sctp_handle_addr_wq();
1654		break;
1655	case SCTP_TIMER_TYPE_SEND:
1656		if ((stcb == NULL) || (inp == NULL)) {
1657			break;
1658		}
1659		SCTP_STAT_INCR(sctps_timodata);
1660		stcb->asoc.timodata++;
1661		stcb->asoc.num_send_timers_up--;
1662		if (stcb->asoc.num_send_timers_up < 0) {
1663			stcb->asoc.num_send_timers_up = 0;
1664		}
1665		SCTP_TCB_LOCK_ASSERT(stcb);
1666		if (sctp_t3rxt_timer(inp, stcb, net)) {
1667			/* no need to unlock on tcb its gone */
1668
1669			goto out_decr;
1670		}
1671		SCTP_TCB_LOCK_ASSERT(stcb);
1672#ifdef SCTP_AUDITING_ENABLED
1673		sctp_auditing(4, inp, stcb, net);
1674#endif
1675		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1676		if ((stcb->asoc.num_send_timers_up == 0) &&
1677		    (stcb->asoc.sent_queue_cnt > 0)) {
1678			struct sctp_tmit_chunk *chk;
1679
1680			/*
1681			 * safeguard. If there on some on the sent queue
1682			 * somewhere but no timers running something is
1683			 * wrong... so we start a timer on the first chunk
1684			 * on the send queue on whatever net it is sent to.
1685			 */
1686			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1687			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1688			    chk->whoTo);
1689		}
1690		break;
1691	case SCTP_TIMER_TYPE_INIT:
1692		if ((stcb == NULL) || (inp == NULL)) {
1693			break;
1694		}
1695		SCTP_STAT_INCR(sctps_timoinit);
1696		stcb->asoc.timoinit++;
1697		if (sctp_t1init_timer(inp, stcb, net)) {
1698			/* no need to unlock on tcb its gone */
1699			goto out_decr;
1700		}
1701		/* We do output but not here */
1702		did_output = 0;
1703		break;
1704	case SCTP_TIMER_TYPE_RECV:
1705		if ((stcb == NULL) || (inp == NULL)) {
1706			break;
1707		}
1708		SCTP_STAT_INCR(sctps_timosack);
1709		stcb->asoc.timosack++;
1710		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1711#ifdef SCTP_AUDITING_ENABLED
1712		sctp_auditing(4, inp, stcb, net);
1713#endif
1714		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1715		break;
1716	case SCTP_TIMER_TYPE_SHUTDOWN:
1717		if ((stcb == NULL) || (inp == NULL)) {
1718			break;
1719		}
1720		if (sctp_shutdown_timer(inp, stcb, net)) {
1721			/* no need to unlock on tcb its gone */
1722			goto out_decr;
1723		}
1724		SCTP_STAT_INCR(sctps_timoshutdown);
1725		stcb->asoc.timoshutdown++;
1726#ifdef SCTP_AUDITING_ENABLED
1727		sctp_auditing(4, inp, stcb, net);
1728#endif
1729		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1730		break;
1731	case SCTP_TIMER_TYPE_HEARTBEAT:
1732		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1733			break;
1734		}
1735		SCTP_STAT_INCR(sctps_timoheartbeat);
1736		stcb->asoc.timoheartbeat++;
1737		if (sctp_heartbeat_timer(inp, stcb, net)) {
1738			/* no need to unlock on tcb its gone */
1739			goto out_decr;
1740		}
1741#ifdef SCTP_AUDITING_ENABLED
1742		sctp_auditing(4, inp, stcb, net);
1743#endif
1744		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1745			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1746			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1747		}
1748		break;
1749	case SCTP_TIMER_TYPE_COOKIE:
1750		if ((stcb == NULL) || (inp == NULL)) {
1751			break;
1752		}
1753		if (sctp_cookie_timer(inp, stcb, net)) {
1754			/* no need to unlock on tcb its gone */
1755			goto out_decr;
1756		}
1757		SCTP_STAT_INCR(sctps_timocookie);
1758		stcb->asoc.timocookie++;
1759#ifdef SCTP_AUDITING_ENABLED
1760		sctp_auditing(4, inp, stcb, net);
1761#endif
1762		/*
1763		 * We consider T3 and Cookie timer pretty much the same with
1764		 * respect to where from in chunk_output.
1765		 */
1766		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1767		break;
1768	case SCTP_TIMER_TYPE_NEWCOOKIE:
1769		{
1770			struct timeval tv;
1771			int i, secret;
1772
1773			if (inp == NULL) {
1774				break;
1775			}
1776			SCTP_STAT_INCR(sctps_timosecret);
1777			(void)SCTP_GETTIME_TIMEVAL(&tv);
1778			SCTP_INP_WLOCK(inp);
1779			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1780			inp->sctp_ep.last_secret_number =
1781			    inp->sctp_ep.current_secret_number;
1782			inp->sctp_ep.current_secret_number++;
1783			if (inp->sctp_ep.current_secret_number >=
1784			    SCTP_HOW_MANY_SECRETS) {
1785				inp->sctp_ep.current_secret_number = 0;
1786			}
1787			secret = (int)inp->sctp_ep.current_secret_number;
1788			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1789				inp->sctp_ep.secret_key[secret][i] =
1790				    sctp_select_initial_TSN(&inp->sctp_ep);
1791			}
1792			SCTP_INP_WUNLOCK(inp);
1793			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1794		}
1795		did_output = 0;
1796		break;
1797	case SCTP_TIMER_TYPE_PATHMTURAISE:
1798		if ((stcb == NULL) || (inp == NULL)) {
1799			break;
1800		}
1801		SCTP_STAT_INCR(sctps_timopathmtu);
1802		sctp_pathmtu_timer(inp, stcb, net);
1803		did_output = 0;
1804		break;
1805	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1806		if ((stcb == NULL) || (inp == NULL)) {
1807			break;
1808		}
1809		if (sctp_shutdownack_timer(inp, stcb, net)) {
1810			/* no need to unlock on tcb its gone */
1811			goto out_decr;
1812		}
1813		SCTP_STAT_INCR(sctps_timoshutdownack);
1814		stcb->asoc.timoshutdownack++;
1815#ifdef SCTP_AUDITING_ENABLED
1816		sctp_auditing(4, inp, stcb, net);
1817#endif
1818		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1819		break;
1820	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1821		if ((stcb == NULL) || (inp == NULL)) {
1822			break;
1823		}
1824		SCTP_STAT_INCR(sctps_timoshutdownguard);
1825		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1826		    "Shutdown guard timer expired");
1827		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1828		/* no need to unlock on tcb its gone */
1829		goto out_decr;
1830
1831	case SCTP_TIMER_TYPE_STRRESET:
1832		if ((stcb == NULL) || (inp == NULL)) {
1833			break;
1834		}
1835		if (sctp_strreset_timer(inp, stcb, net)) {
1836			/* no need to unlock on tcb its gone */
1837			goto out_decr;
1838		}
1839		SCTP_STAT_INCR(sctps_timostrmrst);
1840		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1841		break;
1842	case SCTP_TIMER_TYPE_ASCONF:
1843		if ((stcb == NULL) || (inp == NULL)) {
1844			break;
1845		}
1846		if (sctp_asconf_timer(inp, stcb, net)) {
1847			/* no need to unlock on tcb its gone */
1848			goto out_decr;
1849		}
1850		SCTP_STAT_INCR(sctps_timoasconf);
1851#ifdef SCTP_AUDITING_ENABLED
1852		sctp_auditing(4, inp, stcb, net);
1853#endif
1854		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1855		break;
1856	case SCTP_TIMER_TYPE_PRIM_DELETED:
1857		if ((stcb == NULL) || (inp == NULL)) {
1858			break;
1859		}
1860		sctp_delete_prim_timer(inp, stcb, net);
1861		SCTP_STAT_INCR(sctps_timodelprim);
1862		break;
1863
1864	case SCTP_TIMER_TYPE_AUTOCLOSE:
1865		if ((stcb == NULL) || (inp == NULL)) {
1866			break;
1867		}
1868		SCTP_STAT_INCR(sctps_timoautoclose);
1869		sctp_autoclose_timer(inp, stcb, net);
1870		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1871		did_output = 0;
1872		break;
1873	case SCTP_TIMER_TYPE_ASOCKILL:
1874		if ((stcb == NULL) || (inp == NULL)) {
1875			break;
1876		}
1877		SCTP_STAT_INCR(sctps_timoassockill);
1878		/* Can we free it yet? */
1879		SCTP_INP_DECR_REF(inp);
1880		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1881		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1882#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1883		so = SCTP_INP_SO(inp);
1884		atomic_add_int(&stcb->asoc.refcnt, 1);
1885		SCTP_TCB_UNLOCK(stcb);
1886		SCTP_SOCKET_LOCK(so, 1);
1887		SCTP_TCB_LOCK(stcb);
1888		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1889#endif
1890		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1891		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1892#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1893		SCTP_SOCKET_UNLOCK(so, 1);
1894#endif
1895		/*
1896		 * free asoc, always unlocks (or destroy's) so prevent
1897		 * duplicate unlock or unlock of a free mtx :-0
1898		 */
1899		stcb = NULL;
1900		goto out_no_decr;
1901	case SCTP_TIMER_TYPE_INPKILL:
1902		SCTP_STAT_INCR(sctps_timoinpkill);
1903		if (inp == NULL) {
1904			break;
1905		}
1906		/*
1907		 * special case, take away our increment since WE are the
1908		 * killer
1909		 */
1910		SCTP_INP_DECR_REF(inp);
1911		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1912		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1913		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1914		    SCTP_CALLED_FROM_INPKILL_TIMER);
1915		inp = NULL;
1916		goto out_no_decr;
1917	default:
1918		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1919		    type);
1920		break;
1921	}
1922#ifdef SCTP_AUDITING_ENABLED
1923	sctp_audit_log(0xF1, (uint8_t)type);
1924	if (inp)
1925		sctp_auditing(5, inp, stcb, net);
1926#endif
1927	if ((did_output) && stcb) {
1928		/*
1929		 * Now we need to clean up the control chunk chain if an
1930		 * ECNE is on it. It must be marked as UNSENT again so next
1931		 * call will continue to send it until such time that we get
1932		 * a CWR, to remove it. It is, however, less likely that we
1933		 * will find a ecn echo on the chain though.
1934		 */
1935		sctp_fix_ecn_echo(&stcb->asoc);
1936	}
1937get_out:
1938	if (stcb) {
1939		SCTP_TCB_UNLOCK(stcb);
1940	}
1941out_decr:
1942	if (inp) {
1943		SCTP_INP_DECR_REF(inp);
1944	}
1945out_no_decr:
1946	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1947	CURVNET_RESTORE();
1948}
1949
1950void
1951sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1952    struct sctp_nets *net)
1953{
1954	uint32_t to_ticks;
1955	struct sctp_timer *tmr;
1956
1957	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1958		return;
1959
1960	tmr = NULL;
1961	if (stcb) {
1962		SCTP_TCB_LOCK_ASSERT(stcb);
1963	}
1964	switch (t_type) {
1965	case SCTP_TIMER_TYPE_ZERO_COPY:
1966		tmr = &inp->sctp_ep.zero_copy_timer;
1967		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1968		break;
1969	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1970		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1971		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1972		break;
1973	case SCTP_TIMER_TYPE_ADDR_WQ:
1974		/* Only 1 tick away :-) */
1975		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1976		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1977		break;
1978	case SCTP_TIMER_TYPE_SEND:
1979		/* Here we use the RTO timer */
1980		{
1981			int rto_val;
1982
1983			if ((stcb == NULL) || (net == NULL)) {
1984				return;
1985			}
1986			tmr = &net->rxt_timer;
1987			if (net->RTO == 0) {
1988				rto_val = stcb->asoc.initial_rto;
1989			} else {
1990				rto_val = net->RTO;
1991			}
1992			to_ticks = MSEC_TO_TICKS(rto_val);
1993		}
1994		break;
1995	case SCTP_TIMER_TYPE_INIT:
1996		/*
1997		 * Here we use the INIT timer default usually about 1
1998		 * minute.
1999		 */
2000		if ((stcb == NULL) || (net == NULL)) {
2001			return;
2002		}
2003		tmr = &net->rxt_timer;
2004		if (net->RTO == 0) {
2005			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006		} else {
2007			to_ticks = MSEC_TO_TICKS(net->RTO);
2008		}
2009		break;
2010	case SCTP_TIMER_TYPE_RECV:
2011		/*
2012		 * Here we use the Delayed-Ack timer value from the inp
2013		 * ususually about 200ms.
2014		 */
2015		if (stcb == NULL) {
2016			return;
2017		}
2018		tmr = &stcb->asoc.dack_timer;
2019		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2020		break;
2021	case SCTP_TIMER_TYPE_SHUTDOWN:
2022		/* Here we use the RTO of the destination. */
2023		if ((stcb == NULL) || (net == NULL)) {
2024			return;
2025		}
2026		if (net->RTO == 0) {
2027			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2028		} else {
2029			to_ticks = MSEC_TO_TICKS(net->RTO);
2030		}
2031		tmr = &net->rxt_timer;
2032		break;
2033	case SCTP_TIMER_TYPE_HEARTBEAT:
2034		/*
2035		 * the net is used here so that we can add in the RTO. Even
2036		 * though we use a different timer. We also add the HB timer
2037		 * PLUS a random jitter.
2038		 */
2039		if ((stcb == NULL) || (net == NULL)) {
2040			return;
2041		} else {
2042			uint32_t rndval;
2043			uint32_t jitter;
2044
2045			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2046			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2047				return;
2048			}
2049			if (net->RTO == 0) {
2050				to_ticks = stcb->asoc.initial_rto;
2051			} else {
2052				to_ticks = net->RTO;
2053			}
2054			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2055			jitter = rndval % to_ticks;
2056			if (jitter >= (to_ticks >> 1)) {
2057				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2058			} else {
2059				to_ticks = to_ticks - jitter;
2060			}
2061			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2062			    !(net->dest_state & SCTP_ADDR_PF)) {
2063				to_ticks += net->heart_beat_delay;
2064			}
2065			/*
2066			 * Now we must convert the to_ticks that are now in
2067			 * ms to ticks.
2068			 */
2069			to_ticks = MSEC_TO_TICKS(to_ticks);
2070			tmr = &net->hb_timer;
2071		}
2072		break;
2073	case SCTP_TIMER_TYPE_COOKIE:
2074		/*
2075		 * Here we can use the RTO timer from the network since one
2076		 * RTT was compelete. If a retran happened then we will be
2077		 * using the RTO initial value.
2078		 */
2079		if ((stcb == NULL) || (net == NULL)) {
2080			return;
2081		}
2082		if (net->RTO == 0) {
2083			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084		} else {
2085			to_ticks = MSEC_TO_TICKS(net->RTO);
2086		}
2087		tmr = &net->rxt_timer;
2088		break;
2089	case SCTP_TIMER_TYPE_NEWCOOKIE:
2090		/*
2091		 * nothing needed but the endpoint here ususually about 60
2092		 * minutes.
2093		 */
2094		tmr = &inp->sctp_ep.signature_change;
2095		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2096		break;
2097	case SCTP_TIMER_TYPE_ASOCKILL:
2098		if (stcb == NULL) {
2099			return;
2100		}
2101		tmr = &stcb->asoc.strreset_timer;
2102		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2103		break;
2104	case SCTP_TIMER_TYPE_INPKILL:
2105		/*
2106		 * The inp is setup to die. We re-use the signature_chage
2107		 * timer since that has stopped and we are in the GONE
2108		 * state.
2109		 */
2110		tmr = &inp->sctp_ep.signature_change;
2111		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2112		break;
2113	case SCTP_TIMER_TYPE_PATHMTURAISE:
2114		/*
2115		 * Here we use the value found in the EP for PMTU ususually
2116		 * about 10 minutes.
2117		 */
2118		if ((stcb == NULL) || (net == NULL)) {
2119			return;
2120		}
2121		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2122			return;
2123		}
2124		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2125		tmr = &net->pmtu_timer;
2126		break;
2127	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2128		/* Here we use the RTO of the destination */
2129		if ((stcb == NULL) || (net == NULL)) {
2130			return;
2131		}
2132		if (net->RTO == 0) {
2133			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2134		} else {
2135			to_ticks = MSEC_TO_TICKS(net->RTO);
2136		}
2137		tmr = &net->rxt_timer;
2138		break;
2139	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2140		/*
2141		 * Here we use the endpoints shutdown guard timer usually
2142		 * about 3 minutes.
2143		 */
2144		if (stcb == NULL) {
2145			return;
2146		}
2147		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2148			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2149		} else {
2150			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2151		}
2152		tmr = &stcb->asoc.shut_guard_timer;
2153		break;
2154	case SCTP_TIMER_TYPE_STRRESET:
2155		/*
2156		 * Here the timer comes from the stcb but its value is from
2157		 * the net's RTO.
2158		 */
2159		if ((stcb == NULL) || (net == NULL)) {
2160			return;
2161		}
2162		if (net->RTO == 0) {
2163			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2164		} else {
2165			to_ticks = MSEC_TO_TICKS(net->RTO);
2166		}
2167		tmr = &stcb->asoc.strreset_timer;
2168		break;
2169	case SCTP_TIMER_TYPE_ASCONF:
2170		/*
2171		 * Here the timer comes from the stcb but its value is from
2172		 * the net's RTO.
2173		 */
2174		if ((stcb == NULL) || (net == NULL)) {
2175			return;
2176		}
2177		if (net->RTO == 0) {
2178			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2179		} else {
2180			to_ticks = MSEC_TO_TICKS(net->RTO);
2181		}
2182		tmr = &stcb->asoc.asconf_timer;
2183		break;
2184	case SCTP_TIMER_TYPE_PRIM_DELETED:
2185		if ((stcb == NULL) || (net != NULL)) {
2186			return;
2187		}
2188		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2189		tmr = &stcb->asoc.delete_prim_timer;
2190		break;
2191	case SCTP_TIMER_TYPE_AUTOCLOSE:
2192		if (stcb == NULL) {
2193			return;
2194		}
2195		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2196			/*
2197			 * Really an error since stcb is NOT set to
2198			 * autoclose
2199			 */
2200			return;
2201		}
2202		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2203		tmr = &stcb->asoc.autoclose_timer;
2204		break;
2205	default:
2206		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2207		    __func__, t_type);
2208		return;
2209		break;
2210	}
2211	if ((to_ticks <= 0) || (tmr == NULL)) {
2212		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2213		    __func__, t_type, to_ticks, (void *)tmr);
2214		return;
2215	}
2216	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2217		/*
2218		 * we do NOT allow you to have it already running. if it is
2219		 * we leave the current one up unchanged
2220		 */
2221		return;
2222	}
2223	/* At this point we can proceed */
2224	if (t_type == SCTP_TIMER_TYPE_SEND) {
2225		stcb->asoc.num_send_timers_up++;
2226	}
2227	tmr->stopped_from = 0;
2228	tmr->type = t_type;
2229	tmr->ep = (void *)inp;
2230	tmr->tcb = (void *)stcb;
2231	tmr->net = (void *)net;
2232	tmr->self = (void *)tmr;
2233	tmr->vnet = (void *)curvnet;
2234	tmr->ticks = sctp_get_tick_count();
2235	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2236	return;
2237}
2238
2239void
2240sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2241    struct sctp_nets *net, uint32_t from)
2242{
2243	struct sctp_timer *tmr;
2244
2245	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2246	    (inp == NULL))
2247		return;
2248
2249	tmr = NULL;
2250	if (stcb) {
2251		SCTP_TCB_LOCK_ASSERT(stcb);
2252	}
2253	switch (t_type) {
2254	case SCTP_TIMER_TYPE_ZERO_COPY:
2255		tmr = &inp->sctp_ep.zero_copy_timer;
2256		break;
2257	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2258		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2259		break;
2260	case SCTP_TIMER_TYPE_ADDR_WQ:
2261		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2262		break;
2263	case SCTP_TIMER_TYPE_SEND:
2264		if ((stcb == NULL) || (net == NULL)) {
2265			return;
2266		}
2267		tmr = &net->rxt_timer;
2268		break;
2269	case SCTP_TIMER_TYPE_INIT:
2270		if ((stcb == NULL) || (net == NULL)) {
2271			return;
2272		}
2273		tmr = &net->rxt_timer;
2274		break;
2275	case SCTP_TIMER_TYPE_RECV:
2276		if (stcb == NULL) {
2277			return;
2278		}
2279		tmr = &stcb->asoc.dack_timer;
2280		break;
2281	case SCTP_TIMER_TYPE_SHUTDOWN:
2282		if ((stcb == NULL) || (net == NULL)) {
2283			return;
2284		}
2285		tmr = &net->rxt_timer;
2286		break;
2287	case SCTP_TIMER_TYPE_HEARTBEAT:
2288		if ((stcb == NULL) || (net == NULL)) {
2289			return;
2290		}
2291		tmr = &net->hb_timer;
2292		break;
2293	case SCTP_TIMER_TYPE_COOKIE:
2294		if ((stcb == NULL) || (net == NULL)) {
2295			return;
2296		}
2297		tmr = &net->rxt_timer;
2298		break;
2299	case SCTP_TIMER_TYPE_NEWCOOKIE:
2300		/* nothing needed but the endpoint here */
2301		tmr = &inp->sctp_ep.signature_change;
2302		/*
2303		 * We re-use the newcookie timer for the INP kill timer. We
2304		 * must assure that we do not kill it by accident.
2305		 */
2306		break;
2307	case SCTP_TIMER_TYPE_ASOCKILL:
2308		/*
2309		 * Stop the asoc kill timer.
2310		 */
2311		if (stcb == NULL) {
2312			return;
2313		}
2314		tmr = &stcb->asoc.strreset_timer;
2315		break;
2316
2317	case SCTP_TIMER_TYPE_INPKILL:
2318		/*
2319		 * The inp is setup to die. We re-use the signature_chage
2320		 * timer since that has stopped and we are in the GONE
2321		 * state.
2322		 */
2323		tmr = &inp->sctp_ep.signature_change;
2324		break;
2325	case SCTP_TIMER_TYPE_PATHMTURAISE:
2326		if ((stcb == NULL) || (net == NULL)) {
2327			return;
2328		}
2329		tmr = &net->pmtu_timer;
2330		break;
2331	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2332		if ((stcb == NULL) || (net == NULL)) {
2333			return;
2334		}
2335		tmr = &net->rxt_timer;
2336		break;
2337	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2338		if (stcb == NULL) {
2339			return;
2340		}
2341		tmr = &stcb->asoc.shut_guard_timer;
2342		break;
2343	case SCTP_TIMER_TYPE_STRRESET:
2344		if (stcb == NULL) {
2345			return;
2346		}
2347		tmr = &stcb->asoc.strreset_timer;
2348		break;
2349	case SCTP_TIMER_TYPE_ASCONF:
2350		if (stcb == NULL) {
2351			return;
2352		}
2353		tmr = &stcb->asoc.asconf_timer;
2354		break;
2355	case SCTP_TIMER_TYPE_PRIM_DELETED:
2356		if (stcb == NULL) {
2357			return;
2358		}
2359		tmr = &stcb->asoc.delete_prim_timer;
2360		break;
2361	case SCTP_TIMER_TYPE_AUTOCLOSE:
2362		if (stcb == NULL) {
2363			return;
2364		}
2365		tmr = &stcb->asoc.autoclose_timer;
2366		break;
2367	default:
2368		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2369		    __func__, t_type);
2370		break;
2371	}
2372	if (tmr == NULL) {
2373		return;
2374	}
2375	if ((tmr->type != t_type) && tmr->type) {
2376		/*
2377		 * Ok we have a timer that is under joint use. Cookie timer
2378		 * per chance with the SEND timer. We therefore are NOT
2379		 * running the timer that the caller wants stopped.  So just
2380		 * return.
2381		 */
2382		return;
2383	}
2384	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2385		stcb->asoc.num_send_timers_up--;
2386		if (stcb->asoc.num_send_timers_up < 0) {
2387			stcb->asoc.num_send_timers_up = 0;
2388		}
2389	}
2390	tmr->self = NULL;
2391	tmr->stopped_from = from;
2392	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2393	return;
2394}
2395
2396uint32_t
2397sctp_calculate_len(struct mbuf *m)
2398{
2399	uint32_t tlen = 0;
2400	struct mbuf *at;
2401
2402	at = m;
2403	while (at) {
2404		tlen += SCTP_BUF_LEN(at);
2405		at = SCTP_BUF_NEXT(at);
2406	}
2407	return (tlen);
2408}
2409
2410void
2411sctp_mtu_size_reset(struct sctp_inpcb *inp,
2412    struct sctp_association *asoc, uint32_t mtu)
2413{
2414	/*
2415	 * Reset the P-MTU size on this association, this involves changing
2416	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2417	 * allow the DF flag to be cleared.
2418	 */
2419	struct sctp_tmit_chunk *chk;
2420	unsigned int eff_mtu, ovh;
2421
2422	asoc->smallest_mtu = mtu;
2423	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2424		ovh = SCTP_MIN_OVERHEAD;
2425	} else {
2426		ovh = SCTP_MIN_V4_OVERHEAD;
2427	}
2428	eff_mtu = mtu - ovh;
2429	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2430		if (chk->send_size > eff_mtu) {
2431			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2432		}
2433	}
2434	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2435		if (chk->send_size > eff_mtu) {
2436			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2437		}
2438	}
2439}
2440
2441
2442/*
2443 * given an association and starting time of the current RTT period return
2444 * RTO in number of msecs net should point to the current network
2445 */
2446
2447uint32_t
2448sctp_calculate_rto(struct sctp_tcb *stcb,
2449    struct sctp_association *asoc,
2450    struct sctp_nets *net,
2451    struct timeval *told,
2452    int safe, int rtt_from_sack)
2453{
2454	/*-
2455	 * given an association and the starting time of the current RTT
2456	 * period (in value1/value2) return RTO in number of msecs.
2457	 */
2458	int32_t rtt;		/* RTT in ms */
2459	uint32_t new_rto;
2460	int first_measure = 0;
2461	struct timeval now, then, *old;
2462
2463	/* Copy it out for sparc64 */
2464	if (safe == sctp_align_unsafe_makecopy) {
2465		old = &then;
2466		memcpy(&then, told, sizeof(struct timeval));
2467	} else if (safe == sctp_align_safe_nocopy) {
2468		old = told;
2469	} else {
2470		/* error */
2471		SCTP_PRINTF("Huh, bad rto calc call\n");
2472		return (0);
2473	}
2474	/************************/
2475	/* 1. calculate new RTT */
2476	/************************/
2477	/* get the current time */
2478	if (stcb->asoc.use_precise_time) {
2479		(void)SCTP_GETPTIME_TIMEVAL(&now);
2480	} else {
2481		(void)SCTP_GETTIME_TIMEVAL(&now);
2482	}
2483	timevalsub(&now, old);
2484	/* store the current RTT in us */
2485	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2486	        (uint64_t)now.tv_usec;
2487
2488	/* compute rtt in ms */
2489	rtt = (int32_t)(net->rtt / 1000);
2490	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2491		/*
2492		 * Tell the CC module that a new update has just occurred
2493		 * from a sack
2494		 */
2495		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2496	}
2497	/*
2498	 * Do we need to determine the lan? We do this only on sacks i.e.
2499	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2500	 */
2501	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2502	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2503		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2504			net->lan_type = SCTP_LAN_INTERNET;
2505		} else {
2506			net->lan_type = SCTP_LAN_LOCAL;
2507		}
2508	}
2509	/***************************/
2510	/* 2. update RTTVAR & SRTT */
2511	/***************************/
2512	/*-
2513	 * Compute the scaled average lastsa and the
2514	 * scaled variance lastsv as described in van Jacobson
2515	 * Paper "Congestion Avoidance and Control", Annex A.
2516	 *
2517	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2518	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2519	 */
2520	if (net->RTO_measured) {
2521		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2522		net->lastsa += rtt;
2523		if (rtt < 0) {
2524			rtt = -rtt;
2525		}
2526		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2527		net->lastsv += rtt;
2528		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2529			rto_logging(net, SCTP_LOG_RTTVAR);
2530		}
2531	} else {
2532		/* First RTO measurment */
2533		net->RTO_measured = 1;
2534		first_measure = 1;
2535		net->lastsa = rtt << SCTP_RTT_SHIFT;
2536		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2537		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2538			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2539		}
2540	}
2541	if (net->lastsv == 0) {
2542		net->lastsv = SCTP_CLOCK_GRANULARITY;
2543	}
2544	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2545	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2546	    (stcb->asoc.sat_network_lockout == 0)) {
2547		stcb->asoc.sat_network = 1;
2548	} else if ((!first_measure) && stcb->asoc.sat_network) {
2549		stcb->asoc.sat_network = 0;
2550		stcb->asoc.sat_network_lockout = 1;
2551	}
2552	/* bound it, per C6/C7 in Section 5.3.1 */
2553	if (new_rto < stcb->asoc.minrto) {
2554		new_rto = stcb->asoc.minrto;
2555	}
2556	if (new_rto > stcb->asoc.maxrto) {
2557		new_rto = stcb->asoc.maxrto;
2558	}
2559	/* we are now returning the RTO */
2560	return (new_rto);
2561}
2562
2563/*
2564 * return a pointer to a contiguous piece of data from the given mbuf chain
2565 * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2566 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2567 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2568 */
2569caddr_t
2570sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2571{
2572	uint32_t count;
2573	uint8_t *ptr;
2574
2575	ptr = in_ptr;
2576	if ((off < 0) || (len <= 0))
2577		return (NULL);
2578
2579	/* find the desired start location */
2580	while ((m != NULL) && (off > 0)) {
2581		if (off < SCTP_BUF_LEN(m))
2582			break;
2583		off -= SCTP_BUF_LEN(m);
2584		m = SCTP_BUF_NEXT(m);
2585	}
2586	if (m == NULL)
2587		return (NULL);
2588
2589	/* is the current mbuf large enough (eg. contiguous)? */
2590	if ((SCTP_BUF_LEN(m) - off) >= len) {
2591		return (mtod(m, caddr_t)+off);
2592	} else {
2593		/* else, it spans more than one mbuf, so save a temp copy... */
2594		while ((m != NULL) && (len > 0)) {
2595			count = min(SCTP_BUF_LEN(m) - off, len);
2596			memcpy(ptr, mtod(m, caddr_t)+off, count);
2597			len -= count;
2598			ptr += count;
2599			off = 0;
2600			m = SCTP_BUF_NEXT(m);
2601		}
2602		if ((m == NULL) && (len > 0))
2603			return (NULL);
2604		else
2605			return ((caddr_t)in_ptr);
2606	}
2607}
2608
2609
2610
2611struct sctp_paramhdr *
2612sctp_get_next_param(struct mbuf *m,
2613    int offset,
2614    struct sctp_paramhdr *pull,
2615    int pull_limit)
2616{
2617	/* This just provides a typed signature to Peter's Pull routine */
2618	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2619	    (uint8_t *)pull));
2620}
2621
2622
2623struct mbuf *
2624sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2625{
2626	struct mbuf *m_last;
2627	caddr_t dp;
2628
2629	if (padlen > 3) {
2630		return (NULL);
2631	}
2632	if (padlen <= M_TRAILINGSPACE(m)) {
2633		/*
2634		 * The easy way. We hope the majority of the time we hit
2635		 * here :)
2636		 */
2637		m_last = m;
2638	} else {
2639		/* Hard way we must grow the mbuf chain */
2640		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2641		if (m_last == NULL) {
2642			return (NULL);
2643		}
2644		SCTP_BUF_LEN(m_last) = 0;
2645		SCTP_BUF_NEXT(m_last) = NULL;
2646		SCTP_BUF_NEXT(m) = m_last;
2647	}
2648	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2649	SCTP_BUF_LEN(m_last) += padlen;
2650	memset(dp, 0, padlen);
2651	return (m_last);
2652}
2653
2654struct mbuf *
2655sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2656{
2657	/* find the last mbuf in chain and pad it */
2658	struct mbuf *m_at;
2659
2660	if (last_mbuf != NULL) {
2661		return (sctp_add_pad_tombuf(last_mbuf, padval));
2662	} else {
2663		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2664			if (SCTP_BUF_NEXT(m_at) == NULL) {
2665				return (sctp_add_pad_tombuf(m_at, padval));
2666			}
2667		}
2668	}
2669	return (NULL);
2670}
2671
2672static void
2673sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2674    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2675#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2676    SCTP_UNUSED
2677#endif
2678)
2679{
2680	struct mbuf *m_notify;
2681	struct sctp_assoc_change *sac;
2682	struct sctp_queued_to_read *control;
2683	unsigned int notif_len;
2684	uint16_t abort_len;
2685	unsigned int i;
2686#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2687	struct socket *so;
2688#endif
2689
2690	if (stcb == NULL) {
2691		return;
2692	}
2693	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2694		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2695		if (abort != NULL) {
2696			abort_len = ntohs(abort->ch.chunk_length);
2697		} else {
2698			abort_len = 0;
2699		}
2700		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2701			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2702		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2703			notif_len += abort_len;
2704		}
2705		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2706		if (m_notify == NULL) {
2707			/* Retry with smaller value. */
2708			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2709			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2710			if (m_notify == NULL) {
2711				goto set_error;
2712			}
2713		}
2714		SCTP_BUF_NEXT(m_notify) = NULL;
2715		sac = mtod(m_notify, struct sctp_assoc_change *);
2716		memset(sac, 0, notif_len);
2717		sac->sac_type = SCTP_ASSOC_CHANGE;
2718		sac->sac_flags = 0;
2719		sac->sac_length = sizeof(struct sctp_assoc_change);
2720		sac->sac_state = state;
2721		sac->sac_error = error;
2722		/* XXX verify these stream counts */
2723		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2724		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2725		sac->sac_assoc_id = sctp_get_associd(stcb);
2726		if (notif_len > sizeof(struct sctp_assoc_change)) {
2727			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2728				i = 0;
2729				if (stcb->asoc.prsctp_supported == 1) {
2730					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2731				}
2732				if (stcb->asoc.auth_supported == 1) {
2733					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2734				}
2735				if (stcb->asoc.asconf_supported == 1) {
2736					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2737				}
2738				if (stcb->asoc.idata_supported == 1) {
2739					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2740				}
2741				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2742				if (stcb->asoc.reconfig_supported == 1) {
2743					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2744				}
2745				sac->sac_length += i;
2746			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2747				memcpy(sac->sac_info, abort, abort_len);
2748				sac->sac_length += abort_len;
2749			}
2750		}
2751		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2752		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2753		    0, 0, stcb->asoc.context, 0, 0, 0,
2754		    m_notify);
2755		if (control != NULL) {
2756			control->length = SCTP_BUF_LEN(m_notify);
2757			control->spec_flags = M_NOTIFICATION;
2758			/* not that we need this */
2759			control->tail_mbuf = m_notify;
2760			sctp_add_to_readq(stcb->sctp_ep, stcb,
2761			    control,
2762			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2763			    so_locked);
2764		} else {
2765			sctp_m_freem(m_notify);
2766		}
2767	}
2768	/*
2769	 * For 1-to-1 style sockets, we send up and error when an ABORT
2770	 * comes in.
2771	 */
2772set_error:
2773	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2774	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2775	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2776		SOCK_LOCK(stcb->sctp_socket);
2777		if (from_peer) {
2778			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2779				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2780				stcb->sctp_socket->so_error = ECONNREFUSED;
2781			} else {
2782				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2783				stcb->sctp_socket->so_error = ECONNRESET;
2784			}
2785		} else {
2786			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2787			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2788				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2789				stcb->sctp_socket->so_error = ETIMEDOUT;
2790			} else {
2791				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2792				stcb->sctp_socket->so_error = ECONNABORTED;
2793			}
2794		}
2795	}
2796	/* Wake ANY sleepers */
2797#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2798	so = SCTP_INP_SO(stcb->sctp_ep);
2799	if (!so_locked) {
2800		atomic_add_int(&stcb->asoc.refcnt, 1);
2801		SCTP_TCB_UNLOCK(stcb);
2802		SCTP_SOCKET_LOCK(so, 1);
2803		SCTP_TCB_LOCK(stcb);
2804		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2805		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2806			SCTP_SOCKET_UNLOCK(so, 1);
2807			return;
2808		}
2809	}
2810#endif
2811	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2812	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2813	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2814		socantrcvmore_locked(stcb->sctp_socket);
2815	}
2816	sorwakeup(stcb->sctp_socket);
2817	sowwakeup(stcb->sctp_socket);
2818#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2819	if (!so_locked) {
2820		SCTP_SOCKET_UNLOCK(so, 1);
2821	}
2822#endif
2823}
2824
2825static void
2826sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2827    struct sockaddr *sa, uint32_t error, int so_locked
2828#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2829    SCTP_UNUSED
2830#endif
2831)
2832{
2833	struct mbuf *m_notify;
2834	struct sctp_paddr_change *spc;
2835	struct sctp_queued_to_read *control;
2836
2837	if ((stcb == NULL) ||
2838	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2839		/* event not enabled */
2840		return;
2841	}
2842	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2843	if (m_notify == NULL)
2844		return;
2845	SCTP_BUF_LEN(m_notify) = 0;
2846	spc = mtod(m_notify, struct sctp_paddr_change *);
2847	memset(spc, 0, sizeof(struct sctp_paddr_change));
2848	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2849	spc->spc_flags = 0;
2850	spc->spc_length = sizeof(struct sctp_paddr_change);
2851	switch (sa->sa_family) {
2852#ifdef INET
2853	case AF_INET:
2854#ifdef INET6
2855		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2856			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2857			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2858		} else {
2859			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2860		}
2861#else
2862		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2863#endif
2864		break;
2865#endif
2866#ifdef INET6
2867	case AF_INET6:
2868		{
2869			struct sockaddr_in6 *sin6;
2870
2871			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2872
2873			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2874			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2875				if (sin6->sin6_scope_id == 0) {
2876					/* recover scope_id for user */
2877					(void)sa6_recoverscope(sin6);
2878				} else {
2879					/* clear embedded scope_id for user */
2880					in6_clearscope(&sin6->sin6_addr);
2881				}
2882			}
2883			break;
2884		}
2885#endif
2886	default:
2887		/* TSNH */
2888		break;
2889	}
2890	spc->spc_state = state;
2891	spc->spc_error = error;
2892	spc->spc_assoc_id = sctp_get_associd(stcb);
2893
2894	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2895	SCTP_BUF_NEXT(m_notify) = NULL;
2896
2897	/* append to socket */
2898	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2899	    0, 0, stcb->asoc.context, 0, 0, 0,
2900	    m_notify);
2901	if (control == NULL) {
2902		/* no memory */
2903		sctp_m_freem(m_notify);
2904		return;
2905	}
2906	control->length = SCTP_BUF_LEN(m_notify);
2907	control->spec_flags = M_NOTIFICATION;
2908	/* not that we need this */
2909	control->tail_mbuf = m_notify;
2910	sctp_add_to_readq(stcb->sctp_ep, stcb,
2911	    control,
2912	    &stcb->sctp_socket->so_rcv, 1,
2913	    SCTP_READ_LOCK_NOT_HELD,
2914	    so_locked);
2915}
2916
2917
2918static void
2919sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2920    struct sctp_tmit_chunk *chk, int so_locked
2921#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2922    SCTP_UNUSED
2923#endif
2924)
2925{
2926	struct mbuf *m_notify;
2927	struct sctp_send_failed *ssf;
2928	struct sctp_send_failed_event *ssfe;
2929	struct sctp_queued_to_read *control;
2930	struct sctp_chunkhdr *chkhdr;
2931	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2932
2933	if ((stcb == NULL) ||
2934	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2935	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2936		/* event not enabled */
2937		return;
2938	}
2939	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2940		notifhdr_len = sizeof(struct sctp_send_failed_event);
2941	} else {
2942		notifhdr_len = sizeof(struct sctp_send_failed);
2943	}
2944	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2945	if (m_notify == NULL)
2946		/* no space left */
2947		return;
2948	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2949	if (stcb->asoc.idata_supported) {
2950		chkhdr_len = sizeof(struct sctp_idata_chunk);
2951	} else {
2952		chkhdr_len = sizeof(struct sctp_data_chunk);
2953	}
2954	/* Use some defaults in case we can't access the chunk header */
2955	if (chk->send_size >= chkhdr_len) {
2956		payload_len = chk->send_size - chkhdr_len;
2957	} else {
2958		payload_len = 0;
2959	}
2960	padding_len = 0;
2961	if (chk->data != NULL) {
2962		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2963		if (chkhdr != NULL) {
2964			chk_len = ntohs(chkhdr->chunk_length);
2965			if ((chk_len >= chkhdr_len) &&
2966			    (chk->send_size >= chk_len) &&
2967			    (chk->send_size - chk_len < 4)) {
2968				padding_len = chk->send_size - chk_len;
2969				payload_len = chk->send_size - chkhdr_len - padding_len;
2970			}
2971		}
2972	}
2973	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2974		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2975		memset(ssfe, 0, notifhdr_len);
2976		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2977		if (sent) {
2978			ssfe->ssfe_flags = SCTP_DATA_SENT;
2979		} else {
2980			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2981		}
2982		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2983		ssfe->ssfe_error = error;
2984		/* not exactly what the user sent in, but should be close :) */
2985		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2986		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2987		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2988		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2989		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2990		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2991	} else {
2992		ssf = mtod(m_notify, struct sctp_send_failed *);
2993		memset(ssf, 0, notifhdr_len);
2994		ssf->ssf_type = SCTP_SEND_FAILED;
2995		if (sent) {
2996			ssf->ssf_flags = SCTP_DATA_SENT;
2997		} else {
2998			ssf->ssf_flags = SCTP_DATA_UNSENT;
2999		}
3000		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3001		ssf->ssf_error = error;
3002		/* not exactly what the user sent in, but should be close :) */
3003		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3004		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3005		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3006		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3007		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3008		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3009		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3010	}
3011	if (chk->data != NULL) {
3012		/* Trim off the sctp chunk header (it should be there) */
3013		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3014			m_adj(chk->data, chkhdr_len);
3015			m_adj(chk->data, -padding_len);
3016			sctp_mbuf_crush(chk->data);
3017			chk->send_size -= (chkhdr_len + padding_len);
3018		}
3019	}
3020	SCTP_BUF_NEXT(m_notify) = chk->data;
3021	/* Steal off the mbuf */
3022	chk->data = NULL;
3023	/*
3024	 * For this case, we check the actual socket buffer, since the assoc
3025	 * is going away we don't want to overfill the socket buffer for a
3026	 * non-reader
3027	 */
3028	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3029		sctp_m_freem(m_notify);
3030		return;
3031	}
3032	/* append to socket */
3033	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3034	    0, 0, stcb->asoc.context, 0, 0, 0,
3035	    m_notify);
3036	if (control == NULL) {
3037		/* no memory */
3038		sctp_m_freem(m_notify);
3039		return;
3040	}
3041	control->length = SCTP_BUF_LEN(m_notify);
3042	control->spec_flags = M_NOTIFICATION;
3043	/* not that we need this */
3044	control->tail_mbuf = m_notify;
3045	sctp_add_to_readq(stcb->sctp_ep, stcb,
3046	    control,
3047	    &stcb->sctp_socket->so_rcv, 1,
3048	    SCTP_READ_LOCK_NOT_HELD,
3049	    so_locked);
3050}
3051
3052
3053static void
3054sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3055    struct sctp_stream_queue_pending *sp, int so_locked
3056#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3057    SCTP_UNUSED
3058#endif
3059)
3060{
3061	struct mbuf *m_notify;
3062	struct sctp_send_failed *ssf;
3063	struct sctp_send_failed_event *ssfe;
3064	struct sctp_queued_to_read *control;
3065	int notifhdr_len;
3066
3067	if ((stcb == NULL) ||
3068	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3069	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3070		/* event not enabled */
3071		return;
3072	}
3073	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3074		notifhdr_len = sizeof(struct sctp_send_failed_event);
3075	} else {
3076		notifhdr_len = sizeof(struct sctp_send_failed);
3077	}
3078	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3079	if (m_notify == NULL) {
3080		/* no space left */
3081		return;
3082	}
3083	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3084	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3085		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3086		memset(ssfe, 0, notifhdr_len);
3087		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3088		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3089		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3090		ssfe->ssfe_error = error;
3091		/* not exactly what the user sent in, but should be close :) */
3092		ssfe->ssfe_info.snd_sid = sp->sid;
3093		if (sp->some_taken) {
3094			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3095		} else {
3096			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3097		}
3098		ssfe->ssfe_info.snd_ppid = sp->ppid;
3099		ssfe->ssfe_info.snd_context = sp->context;
3100		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3101		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3102	} else {
3103		ssf = mtod(m_notify, struct sctp_send_failed *);
3104		memset(ssf, 0, notifhdr_len);
3105		ssf->ssf_type = SCTP_SEND_FAILED;
3106		ssf->ssf_flags = SCTP_DATA_UNSENT;
3107		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3108		ssf->ssf_error = error;
3109		/* not exactly what the user sent in, but should be close :) */
3110		ssf->ssf_info.sinfo_stream = sp->sid;
3111		ssf->ssf_info.sinfo_ssn = 0;
3112		if (sp->some_taken) {
3113			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3114		} else {
3115			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3116		}
3117		ssf->ssf_info.sinfo_ppid = sp->ppid;
3118		ssf->ssf_info.sinfo_context = sp->context;
3119		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3120		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3121	}
3122	SCTP_BUF_NEXT(m_notify) = sp->data;
3123
3124	/* Steal off the mbuf */
3125	sp->data = NULL;
3126	/*
3127	 * For this case, we check the actual socket buffer, since the assoc
3128	 * is going away we don't want to overfill the socket buffer for a
3129	 * non-reader
3130	 */
3131	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3132		sctp_m_freem(m_notify);
3133		return;
3134	}
3135	/* append to socket */
3136	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3137	    0, 0, stcb->asoc.context, 0, 0, 0,
3138	    m_notify);
3139	if (control == NULL) {
3140		/* no memory */
3141		sctp_m_freem(m_notify);
3142		return;
3143	}
3144	control->length = SCTP_BUF_LEN(m_notify);
3145	control->spec_flags = M_NOTIFICATION;
3146	/* not that we need this */
3147	control->tail_mbuf = m_notify;
3148	sctp_add_to_readq(stcb->sctp_ep, stcb,
3149	    control,
3150	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3151}
3152
3153
3154
3155static void
3156sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3157{
3158	struct mbuf *m_notify;
3159	struct sctp_adaptation_event *sai;
3160	struct sctp_queued_to_read *control;
3161
3162	if ((stcb == NULL) ||
3163	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3164		/* event not enabled */
3165		return;
3166	}
3167	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3168	if (m_notify == NULL)
3169		/* no space left */
3170		return;
3171	SCTP_BUF_LEN(m_notify) = 0;
3172	sai = mtod(m_notify, struct sctp_adaptation_event *);
3173	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3174	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3175	sai->sai_flags = 0;
3176	sai->sai_length = sizeof(struct sctp_adaptation_event);
3177	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3178	sai->sai_assoc_id = sctp_get_associd(stcb);
3179
3180	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3181	SCTP_BUF_NEXT(m_notify) = NULL;
3182
3183	/* append to socket */
3184	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3185	    0, 0, stcb->asoc.context, 0, 0, 0,
3186	    m_notify);
3187	if (control == NULL) {
3188		/* no memory */
3189		sctp_m_freem(m_notify);
3190		return;
3191	}
3192	control->length = SCTP_BUF_LEN(m_notify);
3193	control->spec_flags = M_NOTIFICATION;
3194	/* not that we need this */
3195	control->tail_mbuf = m_notify;
3196	sctp_add_to_readq(stcb->sctp_ep, stcb,
3197	    control,
3198	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3199}
3200
3201/* This always must be called with the read-queue LOCKED in the INP */
3202static void
3203sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3204    uint32_t val, int so_locked
3205#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3206    SCTP_UNUSED
3207#endif
3208)
3209{
3210	struct mbuf *m_notify;
3211	struct sctp_pdapi_event *pdapi;
3212	struct sctp_queued_to_read *control;
3213	struct sockbuf *sb;
3214
3215	if ((stcb == NULL) ||
3216	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3217		/* event not enabled */
3218		return;
3219	}
3220	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3221		return;
3222	}
3223	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3224	if (m_notify == NULL)
3225		/* no space left */
3226		return;
3227	SCTP_BUF_LEN(m_notify) = 0;
3228	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3229	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3230	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3231	pdapi->pdapi_flags = 0;
3232	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3233	pdapi->pdapi_indication = error;
3234	pdapi->pdapi_stream = (val >> 16);
3235	pdapi->pdapi_seq = (val & 0x0000ffff);
3236	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3237
3238	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3239	SCTP_BUF_NEXT(m_notify) = NULL;
3240	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3241	    0, 0, stcb->asoc.context, 0, 0, 0,
3242	    m_notify);
3243	if (control == NULL) {
3244		/* no memory */
3245		sctp_m_freem(m_notify);
3246		return;
3247	}
3248	control->length = SCTP_BUF_LEN(m_notify);
3249	control->spec_flags = M_NOTIFICATION;
3250	/* not that we need this */
3251	control->tail_mbuf = m_notify;
3252	sb = &stcb->sctp_socket->so_rcv;
3253	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3254		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3255	}
3256	sctp_sballoc(stcb, sb, m_notify);
3257	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3258		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3259	}
3260	control->end_added = 1;
3261	if (stcb->asoc.control_pdapi)
3262		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3263	else {
3264		/* we really should not see this case */
3265		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3266	}
3267	if (stcb->sctp_ep && stcb->sctp_socket) {
3268		/* This should always be the case */
3269#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3270		struct socket *so;
3271
3272		so = SCTP_INP_SO(stcb->sctp_ep);
3273		if (!so_locked) {
3274			atomic_add_int(&stcb->asoc.refcnt, 1);
3275			SCTP_TCB_UNLOCK(stcb);
3276			SCTP_SOCKET_LOCK(so, 1);
3277			SCTP_TCB_LOCK(stcb);
3278			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3279			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3280				SCTP_SOCKET_UNLOCK(so, 1);
3281				return;
3282			}
3283		}
3284#endif
3285		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3286#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3287		if (!so_locked) {
3288			SCTP_SOCKET_UNLOCK(so, 1);
3289		}
3290#endif
3291	}
3292}
3293
3294static void
3295sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3296{
3297	struct mbuf *m_notify;
3298	struct sctp_shutdown_event *sse;
3299	struct sctp_queued_to_read *control;
3300
3301	/*
3302	 * For TCP model AND UDP connected sockets we will send an error up
3303	 * when an SHUTDOWN completes
3304	 */
3305	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3306	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3307		/* mark socket closed for read/write and wakeup! */
3308#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3309		struct socket *so;
3310
3311		so = SCTP_INP_SO(stcb->sctp_ep);
3312		atomic_add_int(&stcb->asoc.refcnt, 1);
3313		SCTP_TCB_UNLOCK(stcb);
3314		SCTP_SOCKET_LOCK(so, 1);
3315		SCTP_TCB_LOCK(stcb);
3316		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3317		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3318			SCTP_SOCKET_UNLOCK(so, 1);
3319			return;
3320		}
3321#endif
3322		socantsendmore(stcb->sctp_socket);
3323#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3324		SCTP_SOCKET_UNLOCK(so, 1);
3325#endif
3326	}
3327	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3328		/* event not enabled */
3329		return;
3330	}
3331	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3332	if (m_notify == NULL)
3333		/* no space left */
3334		return;
3335	sse = mtod(m_notify, struct sctp_shutdown_event *);
3336	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3337	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3338	sse->sse_flags = 0;
3339	sse->sse_length = sizeof(struct sctp_shutdown_event);
3340	sse->sse_assoc_id = sctp_get_associd(stcb);
3341
3342	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3343	SCTP_BUF_NEXT(m_notify) = NULL;
3344
3345	/* append to socket */
3346	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3347	    0, 0, stcb->asoc.context, 0, 0, 0,
3348	    m_notify);
3349	if (control == NULL) {
3350		/* no memory */
3351		sctp_m_freem(m_notify);
3352		return;
3353	}
3354	control->length = SCTP_BUF_LEN(m_notify);
3355	control->spec_flags = M_NOTIFICATION;
3356	/* not that we need this */
3357	control->tail_mbuf = m_notify;
3358	sctp_add_to_readq(stcb->sctp_ep, stcb,
3359	    control,
3360	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3361}
3362
3363static void
3364sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3365    int so_locked
3366#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3367    SCTP_UNUSED
3368#endif
3369)
3370{
3371	struct mbuf *m_notify;
3372	struct sctp_sender_dry_event *event;
3373	struct sctp_queued_to_read *control;
3374
3375	if ((stcb == NULL) ||
3376	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3377		/* event not enabled */
3378		return;
3379	}
3380	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3381	if (m_notify == NULL) {
3382		/* no space left */
3383		return;
3384	}
3385	SCTP_BUF_LEN(m_notify) = 0;
3386	event = mtod(m_notify, struct sctp_sender_dry_event *);
3387	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3388	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3389	event->sender_dry_flags = 0;
3390	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3391	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3392
3393	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3394	SCTP_BUF_NEXT(m_notify) = NULL;
3395
3396	/* append to socket */
3397	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3398	    0, 0, stcb->asoc.context, 0, 0, 0,
3399	    m_notify);
3400	if (control == NULL) {
3401		/* no memory */
3402		sctp_m_freem(m_notify);
3403		return;
3404	}
3405	control->length = SCTP_BUF_LEN(m_notify);
3406	control->spec_flags = M_NOTIFICATION;
3407	/* not that we need this */
3408	control->tail_mbuf = m_notify;
3409	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3410	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3411}
3412
3413
3414void
3415sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3416{
3417	struct mbuf *m_notify;
3418	struct sctp_queued_to_read *control;
3419	struct sctp_stream_change_event *stradd;
3420
3421	if ((stcb == NULL) ||
3422	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3423		/* event not enabled */
3424		return;
3425	}
3426	if ((stcb->asoc.peer_req_out) && flag) {
3427		/* Peer made the request, don't tell the local user */
3428		stcb->asoc.peer_req_out = 0;
3429		return;
3430	}
3431	stcb->asoc.peer_req_out = 0;
3432	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3433	if (m_notify == NULL)
3434		/* no space left */
3435		return;
3436	SCTP_BUF_LEN(m_notify) = 0;
3437	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3438	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3439	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3440	stradd->strchange_flags = flag;
3441	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3442	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3443	stradd->strchange_instrms = numberin;
3444	stradd->strchange_outstrms = numberout;
3445	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3446	SCTP_BUF_NEXT(m_notify) = NULL;
3447	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3448		/* no space */
3449		sctp_m_freem(m_notify);
3450		return;
3451	}
3452	/* append to socket */
3453	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3454	    0, 0, stcb->asoc.context, 0, 0, 0,
3455	    m_notify);
3456	if (control == NULL) {
3457		/* no memory */
3458		sctp_m_freem(m_notify);
3459		return;
3460	}
3461	control->length = SCTP_BUF_LEN(m_notify);
3462	control->spec_flags = M_NOTIFICATION;
3463	/* not that we need this */
3464	control->tail_mbuf = m_notify;
3465	sctp_add_to_readq(stcb->sctp_ep, stcb,
3466	    control,
3467	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3468}
3469
3470void
3471sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3472{
3473	struct mbuf *m_notify;
3474	struct sctp_queued_to_read *control;
3475	struct sctp_assoc_reset_event *strasoc;
3476
3477	if ((stcb == NULL) ||
3478	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3479		/* event not enabled */
3480		return;
3481	}
3482	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3483	if (m_notify == NULL)
3484		/* no space left */
3485		return;
3486	SCTP_BUF_LEN(m_notify) = 0;
3487	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3488	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3489	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3490	strasoc->assocreset_flags = flag;
3491	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3492	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3493	strasoc->assocreset_local_tsn = sending_tsn;
3494	strasoc->assocreset_remote_tsn = recv_tsn;
3495	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3496	SCTP_BUF_NEXT(m_notify) = NULL;
3497	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3498		/* no space */
3499		sctp_m_freem(m_notify);
3500		return;
3501	}
3502	/* append to socket */
3503	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3504	    0, 0, stcb->asoc.context, 0, 0, 0,
3505	    m_notify);
3506	if (control == NULL) {
3507		/* no memory */
3508		sctp_m_freem(m_notify);
3509		return;
3510	}
3511	control->length = SCTP_BUF_LEN(m_notify);
3512	control->spec_flags = M_NOTIFICATION;
3513	/* not that we need this */
3514	control->tail_mbuf = m_notify;
3515	sctp_add_to_readq(stcb->sctp_ep, stcb,
3516	    control,
3517	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3518}
3519
3520
3521
3522static void
3523sctp_notify_stream_reset(struct sctp_tcb *stcb,
3524    int number_entries, uint16_t *list, int flag)
3525{
3526	struct mbuf *m_notify;
3527	struct sctp_queued_to_read *control;
3528	struct sctp_stream_reset_event *strreset;
3529	int len;
3530
3531	if ((stcb == NULL) ||
3532	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3533		/* event not enabled */
3534		return;
3535	}
3536	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3537	if (m_notify == NULL)
3538		/* no space left */
3539		return;
3540	SCTP_BUF_LEN(m_notify) = 0;
3541	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3542	if (len > M_TRAILINGSPACE(m_notify)) {
3543		/* never enough room */
3544		sctp_m_freem(m_notify);
3545		return;
3546	}
3547	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3548	memset(strreset, 0, len);
3549	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3550	strreset->strreset_flags = flag;
3551	strreset->strreset_length = len;
3552	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3553	if (number_entries) {
3554		int i;
3555
3556		for (i = 0; i < number_entries; i++) {
3557			strreset->strreset_stream_list[i] = ntohs(list[i]);
3558		}
3559	}
3560	SCTP_BUF_LEN(m_notify) = len;
3561	SCTP_BUF_NEXT(m_notify) = NULL;
3562	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3563		/* no space */
3564		sctp_m_freem(m_notify);
3565		return;
3566	}
3567	/* append to socket */
3568	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3569	    0, 0, stcb->asoc.context, 0, 0, 0,
3570	    m_notify);
3571	if (control == NULL) {
3572		/* no memory */
3573		sctp_m_freem(m_notify);
3574		return;
3575	}
3576	control->length = SCTP_BUF_LEN(m_notify);
3577	control->spec_flags = M_NOTIFICATION;
3578	/* not that we need this */
3579	control->tail_mbuf = m_notify;
3580	sctp_add_to_readq(stcb->sctp_ep, stcb,
3581	    control,
3582	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3583}
3584
3585
3586static void
3587sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3588{
3589	struct mbuf *m_notify;
3590	struct sctp_remote_error *sre;
3591	struct sctp_queued_to_read *control;
3592	unsigned int notif_len;
3593	uint16_t chunk_len;
3594
3595	if ((stcb == NULL) ||
3596	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3597		return;
3598	}
3599	if (chunk != NULL) {
3600		chunk_len = ntohs(chunk->ch.chunk_length);
3601	} else {
3602		chunk_len = 0;
3603	}
3604	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3605	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3606	if (m_notify == NULL) {
3607		/* Retry with smaller value. */
3608		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3609		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3610		if (m_notify == NULL) {
3611			return;
3612		}
3613	}
3614	SCTP_BUF_NEXT(m_notify) = NULL;
3615	sre = mtod(m_notify, struct sctp_remote_error *);
3616	memset(sre, 0, notif_len);
3617	sre->sre_type = SCTP_REMOTE_ERROR;
3618	sre->sre_flags = 0;
3619	sre->sre_length = sizeof(struct sctp_remote_error);
3620	sre->sre_error = error;
3621	sre->sre_assoc_id = sctp_get_associd(stcb);
3622	if (notif_len > sizeof(struct sctp_remote_error)) {
3623		memcpy(sre->sre_data, chunk, chunk_len);
3624		sre->sre_length += chunk_len;
3625	}
3626	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3627	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3628	    0, 0, stcb->asoc.context, 0, 0, 0,
3629	    m_notify);
3630	if (control != NULL) {
3631		control->length = SCTP_BUF_LEN(m_notify);
3632		control->spec_flags = M_NOTIFICATION;
3633		/* not that we need this */
3634		control->tail_mbuf = m_notify;
3635		sctp_add_to_readq(stcb->sctp_ep, stcb,
3636		    control,
3637		    &stcb->sctp_socket->so_rcv, 1,
3638		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3639	} else {
3640		sctp_m_freem(m_notify);
3641	}
3642}
3643
3644
3645void
3646sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3647    uint32_t error, void *data, int so_locked
3648#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3649    SCTP_UNUSED
3650#endif
3651)
3652{
3653	if ((stcb == NULL) ||
3654	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3655	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3656	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3657		/* If the socket is gone we are out of here */
3658		return;
3659	}
3660	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3661		return;
3662	}
3663	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3664	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3665		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3666		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3667		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3668			/* Don't report these in front states */
3669			return;
3670		}
3671	}
3672	switch (notification) {
3673	case SCTP_NOTIFY_ASSOC_UP:
3674		if (stcb->asoc.assoc_up_sent == 0) {
3675			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3676			stcb->asoc.assoc_up_sent = 1;
3677		}
3678		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3679			sctp_notify_adaptation_layer(stcb);
3680		}
3681		if (stcb->asoc.auth_supported == 0) {
3682			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3683			    NULL, so_locked);
3684		}
3685		break;
3686	case SCTP_NOTIFY_ASSOC_DOWN:
3687		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3688		break;
3689	case SCTP_NOTIFY_INTERFACE_DOWN:
3690		{
3691			struct sctp_nets *net;
3692
3693			net = (struct sctp_nets *)data;
3694			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3695			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3696			break;
3697		}
3698	case SCTP_NOTIFY_INTERFACE_UP:
3699		{
3700			struct sctp_nets *net;
3701
3702			net = (struct sctp_nets *)data;
3703			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3704			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3705			break;
3706		}
3707	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3708		{
3709			struct sctp_nets *net;
3710
3711			net = (struct sctp_nets *)data;
3712			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3713			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3714			break;
3715		}
3716	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3717		sctp_notify_send_failed2(stcb, error,
3718		    (struct sctp_stream_queue_pending *)data, so_locked);
3719		break;
3720	case SCTP_NOTIFY_SENT_DG_FAIL:
3721		sctp_notify_send_failed(stcb, 1, error,
3722		    (struct sctp_tmit_chunk *)data, so_locked);
3723		break;
3724	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3725		sctp_notify_send_failed(stcb, 0, error,
3726		    (struct sctp_tmit_chunk *)data, so_locked);
3727		break;
3728	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3729		{
3730			uint32_t val;
3731
3732			val = *((uint32_t *)data);
3733
3734			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3735			break;
3736		}
3737	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3738		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3739		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3740			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3741		} else {
3742			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3743		}
3744		break;
3745	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3746		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3747		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3748			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3749		} else {
3750			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3751		}
3752		break;
3753	case SCTP_NOTIFY_ASSOC_RESTART:
3754		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3755		if (stcb->asoc.auth_supported == 0) {
3756			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3757			    NULL, so_locked);
3758		}
3759		break;
3760	case SCTP_NOTIFY_STR_RESET_SEND:
3761		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3762		break;
3763	case SCTP_NOTIFY_STR_RESET_RECV:
3764		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3765		break;
3766	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3767		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3768		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3769		break;
3770	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3771		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3772		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3773		break;
3774	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3775		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3776		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3777		break;
3778	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3779		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3780		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3781		break;
3782	case SCTP_NOTIFY_ASCONF_ADD_IP:
3783		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3784		    error, so_locked);
3785		break;
3786	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3787		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3788		    error, so_locked);
3789		break;
3790	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3791		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3792		    error, so_locked);
3793		break;
3794	case SCTP_NOTIFY_PEER_SHUTDOWN:
3795		sctp_notify_shutdown_event(stcb);
3796		break;
3797	case SCTP_NOTIFY_AUTH_NEW_KEY:
3798		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3799		    (uint16_t)(uintptr_t)data,
3800		    so_locked);
3801		break;
3802	case SCTP_NOTIFY_AUTH_FREE_KEY:
3803		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3804		    (uint16_t)(uintptr_t)data,
3805		    so_locked);
3806		break;
3807	case SCTP_NOTIFY_NO_PEER_AUTH:
3808		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3809		    (uint16_t)(uintptr_t)data,
3810		    so_locked);
3811		break;
3812	case SCTP_NOTIFY_SENDER_DRY:
3813		sctp_notify_sender_dry_event(stcb, so_locked);
3814		break;
3815	case SCTP_NOTIFY_REMOTE_ERROR:
3816		sctp_notify_remote_error(stcb, error, data);
3817		break;
3818	default:
3819		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3820		    __func__, notification, notification);
3821		break;
3822	}			/* end switch */
3823}
3824
3825void
3826sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3827#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3828    SCTP_UNUSED
3829#endif
3830)
3831{
3832	struct sctp_association *asoc;
3833	struct sctp_stream_out *outs;
3834	struct sctp_tmit_chunk *chk, *nchk;
3835	struct sctp_stream_queue_pending *sp, *nsp;
3836	int i;
3837
3838	if (stcb == NULL) {
3839		return;
3840	}
3841	asoc = &stcb->asoc;
3842	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3843		/* already being freed */
3844		return;
3845	}
3846	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3847	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3848	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3849		return;
3850	}
3851	/* now through all the gunk freeing chunks */
3852	if (holds_lock == 0) {
3853		SCTP_TCB_SEND_LOCK(stcb);
3854	}
3855	/* sent queue SHOULD be empty */
3856	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3857		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3858		asoc->sent_queue_cnt--;
3859		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3860			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3861				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3862#ifdef INVARIANTS
3863			} else {
3864				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3865#endif
3866			}
3867		}
3868		if (chk->data != NULL) {
3869			sctp_free_bufspace(stcb, asoc, chk, 1);
3870			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3871			    error, chk, so_locked);
3872			if (chk->data) {
3873				sctp_m_freem(chk->data);
3874				chk->data = NULL;
3875			}
3876		}
3877		sctp_free_a_chunk(stcb, chk, so_locked);
3878		/* sa_ignore FREED_MEMORY */
3879	}
3880	/* pending send queue SHOULD be empty */
3881	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3882		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3883		asoc->send_queue_cnt--;
3884		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3885			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3886#ifdef INVARIANTS
3887		} else {
3888			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3889#endif
3890		}
3891		if (chk->data != NULL) {
3892			sctp_free_bufspace(stcb, asoc, chk, 1);
3893			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3894			    error, chk, so_locked);
3895			if (chk->data) {
3896				sctp_m_freem(chk->data);
3897				chk->data = NULL;
3898			}
3899		}
3900		sctp_free_a_chunk(stcb, chk, so_locked);
3901		/* sa_ignore FREED_MEMORY */
3902	}
3903	for (i = 0; i < asoc->streamoutcnt; i++) {
3904		/* For each stream */
3905		outs = &asoc->strmout[i];
3906		/* clean up any sends there */
3907		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3908			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3909			TAILQ_REMOVE(&outs->outqueue, sp, next);
3910			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3911			sctp_free_spbufspace(stcb, asoc, sp);
3912			if (sp->data) {
3913				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3914				    error, (void *)sp, so_locked);
3915				if (sp->data) {
3916					sctp_m_freem(sp->data);
3917					sp->data = NULL;
3918					sp->tail_mbuf = NULL;
3919					sp->length = 0;
3920				}
3921			}
3922			if (sp->net) {
3923				sctp_free_remote_addr(sp->net);
3924				sp->net = NULL;
3925			}
3926			/* Free the chunk */
3927			sctp_free_a_strmoq(stcb, sp, so_locked);
3928			/* sa_ignore FREED_MEMORY */
3929		}
3930	}
3931
3932	if (holds_lock == 0) {
3933		SCTP_TCB_SEND_UNLOCK(stcb);
3934	}
3935}
3936
3937void
3938sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3939    struct sctp_abort_chunk *abort, int so_locked
3940#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3941    SCTP_UNUSED
3942#endif
3943)
3944{
3945	if (stcb == NULL) {
3946		return;
3947	}
3948	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3949	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3950	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3951		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3952	}
3953	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3954	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3955	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3956		return;
3957	}
3958	/* Tell them we lost the asoc */
3959	sctp_report_all_outbound(stcb, error, 1, so_locked);
3960	if (from_peer) {
3961		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3962	} else {
3963		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3964	}
3965}
3966
3967void
3968sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3969    struct mbuf *m, int iphlen,
3970    struct sockaddr *src, struct sockaddr *dst,
3971    struct sctphdr *sh, struct mbuf *op_err,
3972    uint8_t mflowtype, uint32_t mflowid,
3973    uint32_t vrf_id, uint16_t port)
3974{
3975	uint32_t vtag;
3976#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3977	struct socket *so;
3978#endif
3979
3980	vtag = 0;
3981	if (stcb != NULL) {
3982		vtag = stcb->asoc.peer_vtag;
3983		vrf_id = stcb->asoc.vrf_id;
3984	}
3985	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3986	    mflowtype, mflowid, inp->fibnum,
3987	    vrf_id, port);
3988	if (stcb != NULL) {
3989		/* We have a TCB to abort, send notification too */
3990		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3991		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3992		/* Ok, now lets free it */
3993#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3994		so = SCTP_INP_SO(inp);
3995		atomic_add_int(&stcb->asoc.refcnt, 1);
3996		SCTP_TCB_UNLOCK(stcb);
3997		SCTP_SOCKET_LOCK(so, 1);
3998		SCTP_TCB_LOCK(stcb);
3999		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4000#endif
4001		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4002		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4003		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4004			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4005		}
4006		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4007		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4008#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4009		SCTP_SOCKET_UNLOCK(so, 1);
4010#endif
4011	}
4012}
4013#ifdef SCTP_ASOCLOG_OF_TSNS
4014void
4015sctp_print_out_track_log(struct sctp_tcb *stcb)
4016{
4017#ifdef NOSIY_PRINTS
4018	int i;
4019
4020	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4021	SCTP_PRINTF("IN bound TSN log-aaa\n");
4022	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4023		SCTP_PRINTF("None rcvd\n");
4024		goto none_in;
4025	}
4026	if (stcb->asoc.tsn_in_wrapped) {
4027		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4028			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4029			    stcb->asoc.in_tsnlog[i].tsn,
4030			    stcb->asoc.in_tsnlog[i].strm,
4031			    stcb->asoc.in_tsnlog[i].seq,
4032			    stcb->asoc.in_tsnlog[i].flgs,
4033			    stcb->asoc.in_tsnlog[i].sz);
4034		}
4035	}
4036	if (stcb->asoc.tsn_in_at) {
4037		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4038			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4039			    stcb->asoc.in_tsnlog[i].tsn,
4040			    stcb->asoc.in_tsnlog[i].strm,
4041			    stcb->asoc.in_tsnlog[i].seq,
4042			    stcb->asoc.in_tsnlog[i].flgs,
4043			    stcb->asoc.in_tsnlog[i].sz);
4044		}
4045	}
4046none_in:
4047	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4048	if ((stcb->asoc.tsn_out_at == 0) &&
4049	    (stcb->asoc.tsn_out_wrapped == 0)) {
4050		SCTP_PRINTF("None sent\n");
4051	}
4052	if (stcb->asoc.tsn_out_wrapped) {
4053		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4054			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4055			    stcb->asoc.out_tsnlog[i].tsn,
4056			    stcb->asoc.out_tsnlog[i].strm,
4057			    stcb->asoc.out_tsnlog[i].seq,
4058			    stcb->asoc.out_tsnlog[i].flgs,
4059			    stcb->asoc.out_tsnlog[i].sz);
4060		}
4061	}
4062	if (stcb->asoc.tsn_out_at) {
4063		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4064			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4065			    stcb->asoc.out_tsnlog[i].tsn,
4066			    stcb->asoc.out_tsnlog[i].strm,
4067			    stcb->asoc.out_tsnlog[i].seq,
4068			    stcb->asoc.out_tsnlog[i].flgs,
4069			    stcb->asoc.out_tsnlog[i].sz);
4070		}
4071	}
4072#endif
4073}
4074#endif
4075
4076void
4077sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4078    struct mbuf *op_err,
4079    int so_locked
4080#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4081    SCTP_UNUSED
4082#endif
4083)
4084{
4085#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4086	struct socket *so;
4087#endif
4088
4089#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4090	so = SCTP_INP_SO(inp);
4091#endif
4092	if (stcb == NULL) {
4093		/* Got to have a TCB */
4094		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4095			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4096				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4097				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4098			}
4099		}
4100		return;
4101	} else {
4102		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4103	}
4104	/* notify the peer */
4105	sctp_send_abort_tcb(stcb, op_err, so_locked);
4106	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4107	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4108	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4109		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4110	}
4111	/* notify the ulp */
4112	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4113		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4114	}
4115	/* now free the asoc */
4116#ifdef SCTP_ASOCLOG_OF_TSNS
4117	sctp_print_out_track_log(stcb);
4118#endif
4119#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4120	if (!so_locked) {
4121		atomic_add_int(&stcb->asoc.refcnt, 1);
4122		SCTP_TCB_UNLOCK(stcb);
4123		SCTP_SOCKET_LOCK(so, 1);
4124		SCTP_TCB_LOCK(stcb);
4125		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4126	}
4127#endif
4128	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4129	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4130#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4131	if (!so_locked) {
4132		SCTP_SOCKET_UNLOCK(so, 1);
4133	}
4134#endif
4135}
4136
4137void
4138sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4139    struct sockaddr *src, struct sockaddr *dst,
4140    struct sctphdr *sh, struct sctp_inpcb *inp,
4141    struct mbuf *cause,
4142    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4143    uint32_t vrf_id, uint16_t port)
4144{
4145	struct sctp_chunkhdr *ch, chunk_buf;
4146	unsigned int chk_length;
4147	int contains_init_chunk;
4148
4149	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4150	/* Generate a TO address for future reference */
4151	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4152		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4153			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4154			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4155		}
4156	}
4157	contains_init_chunk = 0;
4158	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4159	    sizeof(*ch), (uint8_t *)&chunk_buf);
4160	while (ch != NULL) {
4161		chk_length = ntohs(ch->chunk_length);
4162		if (chk_length < sizeof(*ch)) {
4163			/* break to abort land */
4164			break;
4165		}
4166		switch (ch->chunk_type) {
4167		case SCTP_INIT:
4168			contains_init_chunk = 1;
4169			break;
4170		case SCTP_PACKET_DROPPED:
4171			/* we don't respond to pkt-dropped */
4172			return;
4173		case SCTP_ABORT_ASSOCIATION:
4174			/* we don't respond with an ABORT to an ABORT */
4175			return;
4176		case SCTP_SHUTDOWN_COMPLETE:
4177			/*
4178			 * we ignore it since we are not waiting for it and
4179			 * peer is gone
4180			 */
4181			return;
4182		case SCTP_SHUTDOWN_ACK:
4183			sctp_send_shutdown_complete2(src, dst, sh,
4184			    mflowtype, mflowid, fibnum,
4185			    vrf_id, port);
4186			return;
4187		default:
4188			break;
4189		}
4190		offset += SCTP_SIZE32(chk_length);
4191		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4192		    sizeof(*ch), (uint8_t *)&chunk_buf);
4193	}
4194	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4195	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4196	    (contains_init_chunk == 0))) {
4197		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4198		    mflowtype, mflowid, fibnum,
4199		    vrf_id, port);
4200	}
4201}
4202
4203/*
4204 * check the inbound datagram to make sure there is not an abort inside it,
4205 * if there is return 1, else return 0.
4206 */
4207int
4208sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4209{
4210	struct sctp_chunkhdr *ch;
4211	struct sctp_init_chunk *init_chk, chunk_buf;
4212	int offset;
4213	unsigned int chk_length;
4214
4215	offset = iphlen + sizeof(struct sctphdr);
4216	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4217	    (uint8_t *)&chunk_buf);
4218	while (ch != NULL) {
4219		chk_length = ntohs(ch->chunk_length);
4220		if (chk_length < sizeof(*ch)) {
4221			/* packet is probably corrupt */
4222			break;
4223		}
4224		/* we seem to be ok, is it an abort? */
4225		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4226			/* yep, tell them */
4227			return (1);
4228		}
4229		if (ch->chunk_type == SCTP_INITIATION) {
4230			/* need to update the Vtag */
4231			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4232			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4233			if (init_chk != NULL) {
4234				*vtagfill = ntohl(init_chk->init.initiate_tag);
4235			}
4236		}
4237		/* Nope, move to the next chunk */
4238		offset += SCTP_SIZE32(chk_length);
4239		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4240		    sizeof(*ch), (uint8_t *)&chunk_buf);
4241	}
4242	return (0);
4243}
4244
4245/*
4246 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4247 * set (i.e. it's 0) so, create this function to compare link local scopes
4248 */
4249#ifdef INET6
4250uint32_t
4251sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4252{
4253	struct sockaddr_in6 a, b;
4254
4255	/* save copies */
4256	a = *addr1;
4257	b = *addr2;
4258
4259	if (a.sin6_scope_id == 0)
4260		if (sa6_recoverscope(&a)) {
4261			/* can't get scope, so can't match */
4262			return (0);
4263		}
4264	if (b.sin6_scope_id == 0)
4265		if (sa6_recoverscope(&b)) {
4266			/* can't get scope, so can't match */
4267			return (0);
4268		}
4269	if (a.sin6_scope_id != b.sin6_scope_id)
4270		return (0);
4271
4272	return (1);
4273}
4274
4275/*
4276 * returns a sockaddr_in6 with embedded scope recovered and removed
4277 */
4278struct sockaddr_in6 *
4279sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4280{
4281	/* check and strip embedded scope junk */
4282	if (addr->sin6_family == AF_INET6) {
4283		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4284			if (addr->sin6_scope_id == 0) {
4285				*store = *addr;
4286				if (!sa6_recoverscope(store)) {
4287					/* use the recovered scope */
4288					addr = store;
4289				}
4290			} else {
4291				/* else, return the original "to" addr */
4292				in6_clearscope(&addr->sin6_addr);
4293			}
4294		}
4295	}
4296	return (addr);
4297}
4298#endif
4299
4300/*
4301 * are the two addresses the same?  currently a "scopeless" check returns: 1
4302 * if same, 0 if not
4303 */
4304int
4305sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4306{
4307
4308	/* must be valid */
4309	if (sa1 == NULL || sa2 == NULL)
4310		return (0);
4311
4312	/* must be the same family */
4313	if (sa1->sa_family != sa2->sa_family)
4314		return (0);
4315
4316	switch (sa1->sa_family) {
4317#ifdef INET6
4318	case AF_INET6:
4319		{
4320			/* IPv6 addresses */
4321			struct sockaddr_in6 *sin6_1, *sin6_2;
4322
4323			sin6_1 = (struct sockaddr_in6 *)sa1;
4324			sin6_2 = (struct sockaddr_in6 *)sa2;
4325			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4326			    sin6_2));
4327		}
4328#endif
4329#ifdef INET
4330	case AF_INET:
4331		{
4332			/* IPv4 addresses */
4333			struct sockaddr_in *sin_1, *sin_2;
4334
4335			sin_1 = (struct sockaddr_in *)sa1;
4336			sin_2 = (struct sockaddr_in *)sa2;
4337			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4338		}
4339#endif
4340	default:
4341		/* we don't do these... */
4342		return (0);
4343	}
4344}
4345
4346void
4347sctp_print_address(struct sockaddr *sa)
4348{
4349#ifdef INET6
4350	char ip6buf[INET6_ADDRSTRLEN];
4351#endif
4352
4353	switch (sa->sa_family) {
4354#ifdef INET6
4355	case AF_INET6:
4356		{
4357			struct sockaddr_in6 *sin6;
4358
4359			sin6 = (struct sockaddr_in6 *)sa;
4360			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4361			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4362			    ntohs(sin6->sin6_port),
4363			    sin6->sin6_scope_id);
4364			break;
4365		}
4366#endif
4367#ifdef INET
4368	case AF_INET:
4369		{
4370			struct sockaddr_in *sin;
4371			unsigned char *p;
4372
4373			sin = (struct sockaddr_in *)sa;
4374			p = (unsigned char *)&sin->sin_addr;
4375			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4376			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4377			break;
4378		}
4379#endif
4380	default:
4381		SCTP_PRINTF("?\n");
4382		break;
4383	}
4384}
4385
4386void
4387sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4388    struct sctp_inpcb *new_inp,
4389    struct sctp_tcb *stcb,
4390    int waitflags)
4391{
4392	/*
4393	 * go through our old INP and pull off any control structures that
4394	 * belong to stcb and move then to the new inp.
4395	 */
4396	struct socket *old_so, *new_so;
4397	struct sctp_queued_to_read *control, *nctl;
4398	struct sctp_readhead tmp_queue;
4399	struct mbuf *m;
4400	int error = 0;
4401
4402	old_so = old_inp->sctp_socket;
4403	new_so = new_inp->sctp_socket;
4404	TAILQ_INIT(&tmp_queue);
4405	error = sblock(&old_so->so_rcv, waitflags);
4406	if (error) {
4407		/*
4408		 * Gak, can't get sblock, we have a problem. data will be
4409		 * left stranded.. and we don't dare look at it since the
4410		 * other thread may be reading something. Oh well, its a
4411		 * screwed up app that does a peeloff OR a accept while
4412		 * reading from the main socket... actually its only the
4413		 * peeloff() case, since I think read will fail on a
4414		 * listening socket..
4415		 */
4416		return;
4417	}
4418	/* lock the socket buffers */
4419	SCTP_INP_READ_LOCK(old_inp);
4420	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4421		/* Pull off all for out target stcb */
4422		if (control->stcb == stcb) {
4423			/* remove it we want it */
4424			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4425			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4426			m = control->data;
4427			while (m) {
4428				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4429					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4430				}
4431				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4432				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4433					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4434				}
4435				m = SCTP_BUF_NEXT(m);
4436			}
4437		}
4438	}
4439	SCTP_INP_READ_UNLOCK(old_inp);
4440	/* Remove the sb-lock on the old socket */
4441
4442	sbunlock(&old_so->so_rcv);
4443	/* Now we move them over to the new socket buffer */
4444	SCTP_INP_READ_LOCK(new_inp);
4445	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4446		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4447		m = control->data;
4448		while (m) {
4449			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4450				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4451			}
4452			sctp_sballoc(stcb, &new_so->so_rcv, m);
4453			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4454				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4455			}
4456			m = SCTP_BUF_NEXT(m);
4457		}
4458	}
4459	SCTP_INP_READ_UNLOCK(new_inp);
4460}
4461
4462void
4463sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4464    struct sctp_tcb *stcb,
4465    int so_locked
4466#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4467    SCTP_UNUSED
4468#endif
4469)
4470{
4471	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4472		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4473			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4474		} else {
4475#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4476			struct socket *so;
4477
4478			so = SCTP_INP_SO(inp);
4479			if (!so_locked) {
4480				if (stcb) {
4481					atomic_add_int(&stcb->asoc.refcnt, 1);
4482					SCTP_TCB_UNLOCK(stcb);
4483				}
4484				SCTP_SOCKET_LOCK(so, 1);
4485				if (stcb) {
4486					SCTP_TCB_LOCK(stcb);
4487					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4488				}
4489				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4490					SCTP_SOCKET_UNLOCK(so, 1);
4491					return;
4492				}
4493			}
4494#endif
4495			sctp_sorwakeup(inp, inp->sctp_socket);
4496#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4497			if (!so_locked) {
4498				SCTP_SOCKET_UNLOCK(so, 1);
4499			}
4500#endif
4501		}
4502	}
4503}
4504
4505void
4506sctp_add_to_readq(struct sctp_inpcb *inp,
4507    struct sctp_tcb *stcb,
4508    struct sctp_queued_to_read *control,
4509    struct sockbuf *sb,
4510    int end,
4511    int inp_read_lock_held,
4512    int so_locked
4513#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4514    SCTP_UNUSED
4515#endif
4516)
4517{
4518	/*
4519	 * Here we must place the control on the end of the socket read
4520	 * queue AND increment sb_cc so that select will work properly on
4521	 * read.
4522	 */
4523	struct mbuf *m, *prev = NULL;
4524
4525	if (inp == NULL) {
4526		/* Gak, TSNH!! */
4527#ifdef INVARIANTS
4528		panic("Gak, inp NULL on add_to_readq");
4529#endif
4530		return;
4531	}
4532	if (inp_read_lock_held == 0)
4533		SCTP_INP_READ_LOCK(inp);
4534	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4535		sctp_free_remote_addr(control->whoFrom);
4536		if (control->data) {
4537			sctp_m_freem(control->data);
4538			control->data = NULL;
4539		}
4540		sctp_free_a_readq(stcb, control);
4541		if (inp_read_lock_held == 0)
4542			SCTP_INP_READ_UNLOCK(inp);
4543		return;
4544	}
4545	if (!(control->spec_flags & M_NOTIFICATION)) {
4546		atomic_add_int(&inp->total_recvs, 1);
4547		if (!control->do_not_ref_stcb) {
4548			atomic_add_int(&stcb->total_recvs, 1);
4549		}
4550	}
4551	m = control->data;
4552	control->held_length = 0;
4553	control->length = 0;
4554	while (m) {
4555		if (SCTP_BUF_LEN(m) == 0) {
4556			/* Skip mbufs with NO length */
4557			if (prev == NULL) {
4558				/* First one */
4559				control->data = sctp_m_free(m);
4560				m = control->data;
4561			} else {
4562				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4563				m = SCTP_BUF_NEXT(prev);
4564			}
4565			if (m == NULL) {
4566				control->tail_mbuf = prev;
4567			}
4568			continue;
4569		}
4570		prev = m;
4571		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4572			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4573		}
4574		sctp_sballoc(stcb, sb, m);
4575		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4576			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4577		}
4578		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4579		m = SCTP_BUF_NEXT(m);
4580	}
4581	if (prev != NULL) {
4582		control->tail_mbuf = prev;
4583	} else {
4584		/* Everything got collapsed out?? */
4585		sctp_free_remote_addr(control->whoFrom);
4586		sctp_free_a_readq(stcb, control);
4587		if (inp_read_lock_held == 0)
4588			SCTP_INP_READ_UNLOCK(inp);
4589		return;
4590	}
4591	if (end) {
4592		control->end_added = 1;
4593	}
4594	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4595	control->on_read_q = 1;
4596	if (inp_read_lock_held == 0)
4597		SCTP_INP_READ_UNLOCK(inp);
4598	if (inp && inp->sctp_socket) {
4599		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4600	}
4601}
4602
4603/*************HOLD THIS COMMENT FOR PATCH FILE OF
4604 *************ALTERNATE ROUTING CODE
4605 */
4606
4607/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4608 *************ALTERNATE ROUTING CODE
4609 */
4610
4611struct mbuf *
4612sctp_generate_cause(uint16_t code, char *info)
4613{
4614	struct mbuf *m;
4615	struct sctp_gen_error_cause *cause;
4616	size_t info_len;
4617	uint16_t len;
4618
4619	if ((code == 0) || (info == NULL)) {
4620		return (NULL);
4621	}
4622	info_len = strlen(info);
4623	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4624		return (NULL);
4625	}
4626	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4627	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4628	if (m != NULL) {
4629		SCTP_BUF_LEN(m) = len;
4630		cause = mtod(m, struct sctp_gen_error_cause *);
4631		cause->code = htons(code);
4632		cause->length = htons(len);
4633		memcpy(cause->info, info, info_len);
4634	}
4635	return (m);
4636}
4637
4638struct mbuf *
4639sctp_generate_no_user_data_cause(uint32_t tsn)
4640{
4641	struct mbuf *m;
4642	struct sctp_error_no_user_data *no_user_data_cause;
4643	uint16_t len;
4644
4645	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4646	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4647	if (m != NULL) {
4648		SCTP_BUF_LEN(m) = len;
4649		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4650		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4651		no_user_data_cause->cause.length = htons(len);
4652		no_user_data_cause->tsn = htonl(tsn);
4653	}
4654	return (m);
4655}
4656
4657#ifdef SCTP_MBCNT_LOGGING
4658void
4659sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4660    struct sctp_tmit_chunk *tp1, int chk_cnt)
4661{
4662	if (tp1->data == NULL) {
4663		return;
4664	}
4665	asoc->chunks_on_out_queue -= chk_cnt;
4666	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4667		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4668		    asoc->total_output_queue_size,
4669		    tp1->book_size,
4670		    0,
4671		    tp1->mbcnt);
4672	}
4673	if (asoc->total_output_queue_size >= tp1->book_size) {
4674		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4675	} else {
4676		asoc->total_output_queue_size = 0;
4677	}
4678
4679	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4680	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4681		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4682			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4683		} else {
4684			stcb->sctp_socket->so_snd.sb_cc = 0;
4685
4686		}
4687	}
4688}
4689
4690#endif
4691
4692int
4693sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4694    uint8_t sent, int so_locked
4695#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4696    SCTP_UNUSED
4697#endif
4698)
4699{
4700	struct sctp_stream_out *strq;
4701	struct sctp_tmit_chunk *chk = NULL, *tp2;
4702	struct sctp_stream_queue_pending *sp;
4703	uint32_t mid;
4704	uint16_t sid;
4705	uint8_t foundeom = 0;
4706	int ret_sz = 0;
4707	int notdone;
4708	int do_wakeup_routine = 0;
4709
4710	sid = tp1->rec.data.sid;
4711	mid = tp1->rec.data.mid;
4712	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4713		stcb->asoc.abandoned_sent[0]++;
4714		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4715		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4716#if defined(SCTP_DETAILED_STR_STATS)
4717		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4718#endif
4719	} else {
4720		stcb->asoc.abandoned_unsent[0]++;
4721		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4722		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4723#if defined(SCTP_DETAILED_STR_STATS)
4724		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4725#endif
4726	}
4727	do {
4728		ret_sz += tp1->book_size;
4729		if (tp1->data != NULL) {
4730			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4731				sctp_flight_size_decrease(tp1);
4732				sctp_total_flight_decrease(stcb, tp1);
4733			}
4734			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4735			stcb->asoc.peers_rwnd += tp1->send_size;
4736			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4737			if (sent) {
4738				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4739			} else {
4740				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4741			}
4742			if (tp1->data) {
4743				sctp_m_freem(tp1->data);
4744				tp1->data = NULL;
4745			}
4746			do_wakeup_routine = 1;
4747			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4748				stcb->asoc.sent_queue_cnt_removeable--;
4749			}
4750		}
4751		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4752		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4753		    SCTP_DATA_NOT_FRAG) {
4754			/* not frag'ed we ae done   */
4755			notdone = 0;
4756			foundeom = 1;
4757		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4758			/* end of frag, we are done */
4759			notdone = 0;
4760			foundeom = 1;
4761		} else {
4762			/*
4763			 * Its a begin or middle piece, we must mark all of
4764			 * it
4765			 */
4766			notdone = 1;
4767			tp1 = TAILQ_NEXT(tp1, sctp_next);
4768		}
4769	} while (tp1 && notdone);
4770	if (foundeom == 0) {
4771		/*
4772		 * The multi-part message was scattered across the send and
4773		 * sent queue.
4774		 */
4775		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4776			if ((tp1->rec.data.sid != sid) ||
4777			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4778				break;
4779			}
4780			/*
4781			 * save to chk in case we have some on stream out
4782			 * queue. If so and we have an un-transmitted one we
4783			 * don't have to fudge the TSN.
4784			 */
4785			chk = tp1;
4786			ret_sz += tp1->book_size;
4787			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4788			if (sent) {
4789				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4790			} else {
4791				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4792			}
4793			if (tp1->data) {
4794				sctp_m_freem(tp1->data);
4795				tp1->data = NULL;
4796			}
4797			/* No flight involved here book the size to 0 */
4798			tp1->book_size = 0;
4799			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4800				foundeom = 1;
4801			}
4802			do_wakeup_routine = 1;
4803			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4804			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4805			/*
4806			 * on to the sent queue so we can wait for it to be
4807			 * passed by.
4808			 */
4809			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4810			    sctp_next);
4811			stcb->asoc.send_queue_cnt--;
4812			stcb->asoc.sent_queue_cnt++;
4813		}
4814	}
4815	if (foundeom == 0) {
4816		/*
4817		 * Still no eom found. That means there is stuff left on the
4818		 * stream out queue.. yuck.
4819		 */
4820		SCTP_TCB_SEND_LOCK(stcb);
4821		strq = &stcb->asoc.strmout[sid];
4822		sp = TAILQ_FIRST(&strq->outqueue);
4823		if (sp != NULL) {
4824			sp->discard_rest = 1;
4825			/*
4826			 * We may need to put a chunk on the queue that
4827			 * holds the TSN that would have been sent with the
4828			 * LAST bit.
4829			 */
4830			if (chk == NULL) {
4831				/* Yep, we have to */
4832				sctp_alloc_a_chunk(stcb, chk);
4833				if (chk == NULL) {
4834					/*
4835					 * we are hosed. All we can do is
4836					 * nothing.. which will cause an
4837					 * abort if the peer is paying
4838					 * attention.
4839					 */
4840					goto oh_well;
4841				}
4842				memset(chk, 0, sizeof(*chk));
4843				chk->rec.data.rcv_flags = 0;
4844				chk->sent = SCTP_FORWARD_TSN_SKIP;
4845				chk->asoc = &stcb->asoc;
4846				if (stcb->asoc.idata_supported == 0) {
4847					if (sp->sinfo_flags & SCTP_UNORDERED) {
4848						chk->rec.data.mid = 0;
4849					} else {
4850						chk->rec.data.mid = strq->next_mid_ordered;
4851					}
4852				} else {
4853					if (sp->sinfo_flags & SCTP_UNORDERED) {
4854						chk->rec.data.mid = strq->next_mid_unordered;
4855					} else {
4856						chk->rec.data.mid = strq->next_mid_ordered;
4857					}
4858				}
4859				chk->rec.data.sid = sp->sid;
4860				chk->rec.data.ppid = sp->ppid;
4861				chk->rec.data.context = sp->context;
4862				chk->flags = sp->act_flags;
4863				chk->whoTo = NULL;
4864				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4865				strq->chunks_on_queues++;
4866				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4867				stcb->asoc.sent_queue_cnt++;
4868				stcb->asoc.pr_sctp_cnt++;
4869			}
4870			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4871			if (sp->sinfo_flags & SCTP_UNORDERED) {
4872				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4873			}
4874			if (stcb->asoc.idata_supported == 0) {
4875				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4876					strq->next_mid_ordered++;
4877				}
4878			} else {
4879				if (sp->sinfo_flags & SCTP_UNORDERED) {
4880					strq->next_mid_unordered++;
4881				} else {
4882					strq->next_mid_ordered++;
4883				}
4884			}
4885	oh_well:
4886			if (sp->data) {
4887				/*
4888				 * Pull any data to free up the SB and allow
4889				 * sender to "add more" while we will throw
4890				 * away :-)
4891				 */
4892				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4893				ret_sz += sp->length;
4894				do_wakeup_routine = 1;
4895				sp->some_taken = 1;
4896				sctp_m_freem(sp->data);
4897				sp->data = NULL;
4898				sp->tail_mbuf = NULL;
4899				sp->length = 0;
4900			}
4901		}
4902		SCTP_TCB_SEND_UNLOCK(stcb);
4903	}
4904	if (do_wakeup_routine) {
4905#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4906		struct socket *so;
4907
4908		so = SCTP_INP_SO(stcb->sctp_ep);
4909		if (!so_locked) {
4910			atomic_add_int(&stcb->asoc.refcnt, 1);
4911			SCTP_TCB_UNLOCK(stcb);
4912			SCTP_SOCKET_LOCK(so, 1);
4913			SCTP_TCB_LOCK(stcb);
4914			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4915			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4916				/* assoc was freed while we were unlocked */
4917				SCTP_SOCKET_UNLOCK(so, 1);
4918				return (ret_sz);
4919			}
4920		}
4921#endif
4922		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4923#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4924		if (!so_locked) {
4925			SCTP_SOCKET_UNLOCK(so, 1);
4926		}
4927#endif
4928	}
4929	return (ret_sz);
4930}
4931
4932/*
4933 * checks to see if the given address, sa, is one that is currently known by
4934 * the kernel note: can't distinguish the same address on multiple interfaces
4935 * and doesn't handle multiple addresses with different zone/scope id's note:
4936 * ifa_ifwithaddr() compares the entire sockaddr struct
4937 */
4938struct sctp_ifa *
4939sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4940    int holds_lock)
4941{
4942	struct sctp_laddr *laddr;
4943
4944	if (holds_lock == 0) {
4945		SCTP_INP_RLOCK(inp);
4946	}
4947	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4948		if (laddr->ifa == NULL)
4949			continue;
4950		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4951			continue;
4952#ifdef INET
4953		if (addr->sa_family == AF_INET) {
4954			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4955			    laddr->ifa->address.sin.sin_addr.s_addr) {
4956				/* found him. */
4957				if (holds_lock == 0) {
4958					SCTP_INP_RUNLOCK(inp);
4959				}
4960				return (laddr->ifa);
4961				break;
4962			}
4963		}
4964#endif
4965#ifdef INET6
4966		if (addr->sa_family == AF_INET6) {
4967			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4968			    &laddr->ifa->address.sin6)) {
4969				/* found him. */
4970				if (holds_lock == 0) {
4971					SCTP_INP_RUNLOCK(inp);
4972				}
4973				return (laddr->ifa);
4974				break;
4975			}
4976		}
4977#endif
4978	}
4979	if (holds_lock == 0) {
4980		SCTP_INP_RUNLOCK(inp);
4981	}
4982	return (NULL);
4983}
4984
4985uint32_t
4986sctp_get_ifa_hash_val(struct sockaddr *addr)
4987{
4988	switch (addr->sa_family) {
4989#ifdef INET
4990	case AF_INET:
4991		{
4992			struct sockaddr_in *sin;
4993
4994			sin = (struct sockaddr_in *)addr;
4995			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4996		}
4997#endif
4998#ifdef INET6
4999	case AF_INET6:
5000		{
5001			struct sockaddr_in6 *sin6;
5002			uint32_t hash_of_addr;
5003
5004			sin6 = (struct sockaddr_in6 *)addr;
5005			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5006			    sin6->sin6_addr.s6_addr32[1] +
5007			    sin6->sin6_addr.s6_addr32[2] +
5008			    sin6->sin6_addr.s6_addr32[3]);
5009			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5010			return (hash_of_addr);
5011		}
5012#endif
5013	default:
5014		break;
5015	}
5016	return (0);
5017}
5018
5019struct sctp_ifa *
5020sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5021{
5022	struct sctp_ifa *sctp_ifap;
5023	struct sctp_vrf *vrf;
5024	struct sctp_ifalist *hash_head;
5025	uint32_t hash_of_addr;
5026
5027	if (holds_lock == 0)
5028		SCTP_IPI_ADDR_RLOCK();
5029
5030	vrf = sctp_find_vrf(vrf_id);
5031	if (vrf == NULL) {
5032		if (holds_lock == 0)
5033			SCTP_IPI_ADDR_RUNLOCK();
5034		return (NULL);
5035	}
5036	hash_of_addr = sctp_get_ifa_hash_val(addr);
5037
5038	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5039	if (hash_head == NULL) {
5040		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5041		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5042		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5043		sctp_print_address(addr);
5044		SCTP_PRINTF("No such bucket for address\n");
5045		if (holds_lock == 0)
5046			SCTP_IPI_ADDR_RUNLOCK();
5047
5048		return (NULL);
5049	}
5050	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5051		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5052			continue;
5053#ifdef INET
5054		if (addr->sa_family == AF_INET) {
5055			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5056			    sctp_ifap->address.sin.sin_addr.s_addr) {
5057				/* found him. */
5058				if (holds_lock == 0)
5059					SCTP_IPI_ADDR_RUNLOCK();
5060				return (sctp_ifap);
5061				break;
5062			}
5063		}
5064#endif
5065#ifdef INET6
5066		if (addr->sa_family == AF_INET6) {
5067			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5068			    &sctp_ifap->address.sin6)) {
5069				/* found him. */
5070				if (holds_lock == 0)
5071					SCTP_IPI_ADDR_RUNLOCK();
5072				return (sctp_ifap);
5073				break;
5074			}
5075		}
5076#endif
5077	}
5078	if (holds_lock == 0)
5079		SCTP_IPI_ADDR_RUNLOCK();
5080	return (NULL);
5081}
5082
5083static void
5084sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5085    uint32_t rwnd_req)
5086{
5087	/* User pulled some data, do we need a rwnd update? */
5088	int r_unlocked = 0;
5089	uint32_t dif, rwnd;
5090	struct socket *so = NULL;
5091
5092	if (stcb == NULL)
5093		return;
5094
5095	atomic_add_int(&stcb->asoc.refcnt, 1);
5096
5097	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5098	    SCTP_STATE_SHUTDOWN_RECEIVED |
5099	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5100		/* Pre-check If we are freeing no update */
5101		goto no_lock;
5102	}
5103	SCTP_INP_INCR_REF(stcb->sctp_ep);
5104	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5105	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5106		goto out;
5107	}
5108	so = stcb->sctp_socket;
5109	if (so == NULL) {
5110		goto out;
5111	}
5112	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5113	/* Have you have freed enough to look */
5114	*freed_so_far = 0;
5115	/* Yep, its worth a look and the lock overhead */
5116
5117	/* Figure out what the rwnd would be */
5118	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5119	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5120		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5121	} else {
5122		dif = 0;
5123	}
5124	if (dif >= rwnd_req) {
5125		if (hold_rlock) {
5126			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5127			r_unlocked = 1;
5128		}
5129		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5130			/*
5131			 * One last check before we allow the guy possibly
5132			 * to get in. There is a race, where the guy has not
5133			 * reached the gate. In that case
5134			 */
5135			goto out;
5136		}
5137		SCTP_TCB_LOCK(stcb);
5138		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5139			/* No reports here */
5140			SCTP_TCB_UNLOCK(stcb);
5141			goto out;
5142		}
5143		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5144		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5145
5146		sctp_chunk_output(stcb->sctp_ep, stcb,
5147		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5148		/* make sure no timer is running */
5149		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5150		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5151		SCTP_TCB_UNLOCK(stcb);
5152	} else {
5153		/* Update how much we have pending */
5154		stcb->freed_by_sorcv_sincelast = dif;
5155	}
5156out:
5157	if (so && r_unlocked && hold_rlock) {
5158		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5159	}
5160	SCTP_INP_DECR_REF(stcb->sctp_ep);
5161no_lock:
5162	atomic_add_int(&stcb->asoc.refcnt, -1);
5163	return;
5164}
5165
5166int
5167sctp_sorecvmsg(struct socket *so,
5168    struct uio *uio,
5169    struct mbuf **mp,
5170    struct sockaddr *from,
5171    int fromlen,
5172    int *msg_flags,
5173    struct sctp_sndrcvinfo *sinfo,
5174    int filling_sinfo)
5175{
5176	/*
5177	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5178	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5179	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5180	 * On the way out we may send out any combination of:
5181	 * MSG_NOTIFICATION MSG_EOR
5182	 *
5183	 */
5184	struct sctp_inpcb *inp = NULL;
5185	int my_len = 0;
5186	int cp_len = 0, error = 0;
5187	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5188	struct mbuf *m = NULL;
5189	struct sctp_tcb *stcb = NULL;
5190	int wakeup_read_socket = 0;
5191	int freecnt_applied = 0;
5192	int out_flags = 0, in_flags = 0;
5193	int block_allowed = 1;
5194	uint32_t freed_so_far = 0;
5195	uint32_t copied_so_far = 0;
5196	int in_eeor_mode = 0;
5197	int no_rcv_needed = 0;
5198	uint32_t rwnd_req = 0;
5199	int hold_sblock = 0;
5200	int hold_rlock = 0;
5201	ssize_t slen = 0;
5202	uint32_t held_length = 0;
5203	int sockbuf_lock = 0;
5204
5205	if (uio == NULL) {
5206		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5207		return (EINVAL);
5208	}
5209	if (msg_flags) {
5210		in_flags = *msg_flags;
5211		if (in_flags & MSG_PEEK)
5212			SCTP_STAT_INCR(sctps_read_peeks);
5213	} else {
5214		in_flags = 0;
5215	}
5216	slen = uio->uio_resid;
5217
5218	/* Pull in and set up our int flags */
5219	if (in_flags & MSG_OOB) {
5220		/* Out of band's NOT supported */
5221		return (EOPNOTSUPP);
5222	}
5223	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5224		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5225		return (EINVAL);
5226	}
5227	if ((in_flags & (MSG_DONTWAIT
5228	    | MSG_NBIO
5229	    )) ||
5230	    SCTP_SO_IS_NBIO(so)) {
5231		block_allowed = 0;
5232	}
5233	/* setup the endpoint */
5234	inp = (struct sctp_inpcb *)so->so_pcb;
5235	if (inp == NULL) {
5236		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5237		return (EFAULT);
5238	}
5239	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5240	/* Must be at least a MTU's worth */
5241	if (rwnd_req < SCTP_MIN_RWND)
5242		rwnd_req = SCTP_MIN_RWND;
5243	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5244	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5245		sctp_misc_ints(SCTP_SORECV_ENTER,
5246		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5247	}
5248	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5249		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5250		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5251	}
5252	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5253	if (error) {
5254		goto release_unlocked;
5255	}
5256	sockbuf_lock = 1;
5257restart:
5258
5259
5260restart_nosblocks:
5261	if (hold_sblock == 0) {
5262		SOCKBUF_LOCK(&so->so_rcv);
5263		hold_sblock = 1;
5264	}
5265	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5266	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5267		goto out;
5268	}
5269	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5270		if (so->so_error) {
5271			error = so->so_error;
5272			if ((in_flags & MSG_PEEK) == 0)
5273				so->so_error = 0;
5274			goto out;
5275		} else {
5276			if (so->so_rcv.sb_cc == 0) {
5277				/* indicate EOF */
5278				error = 0;
5279				goto out;
5280			}
5281		}
5282	}
5283	if (so->so_rcv.sb_cc <= held_length) {
5284		if (so->so_error) {
5285			error = so->so_error;
5286			if ((in_flags & MSG_PEEK) == 0) {
5287				so->so_error = 0;
5288			}
5289			goto out;
5290		}
5291		if ((so->so_rcv.sb_cc == 0) &&
5292		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5293		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5294			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5295				/*
5296				 * For active open side clear flags for
5297				 * re-use passive open is blocked by
5298				 * connect.
5299				 */
5300				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5301					/*
5302					 * You were aborted, passive side
5303					 * always hits here
5304					 */
5305					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5306					error = ECONNRESET;
5307				}
5308				so->so_state &= ~(SS_ISCONNECTING |
5309				    SS_ISDISCONNECTING |
5310				    SS_ISCONFIRMING |
5311				    SS_ISCONNECTED);
5312				if (error == 0) {
5313					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5314						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5315						error = ENOTCONN;
5316					}
5317				}
5318				goto out;
5319			}
5320		}
5321		if (block_allowed) {
5322			error = sbwait(&so->so_rcv);
5323			if (error) {
5324				goto out;
5325			}
5326			held_length = 0;
5327			goto restart_nosblocks;
5328		} else {
5329			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5330			error = EWOULDBLOCK;
5331			goto out;
5332		}
5333	}
5334	if (hold_sblock == 1) {
5335		SOCKBUF_UNLOCK(&so->so_rcv);
5336		hold_sblock = 0;
5337	}
5338	/* we possibly have data we can read */
5339	/* sa_ignore FREED_MEMORY */
5340	control = TAILQ_FIRST(&inp->read_queue);
5341	if (control == NULL) {
5342		/*
5343		 * This could be happening since the appender did the
5344		 * increment but as not yet did the tailq insert onto the
5345		 * read_queue
5346		 */
5347		if (hold_rlock == 0) {
5348			SCTP_INP_READ_LOCK(inp);
5349		}
5350		control = TAILQ_FIRST(&inp->read_queue);
5351		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5352#ifdef INVARIANTS
5353			panic("Huh, its non zero and nothing on control?");
5354#endif
5355			so->so_rcv.sb_cc = 0;
5356		}
5357		SCTP_INP_READ_UNLOCK(inp);
5358		hold_rlock = 0;
5359		goto restart;
5360	}
5361	if ((control->length == 0) &&
5362	    (control->do_not_ref_stcb)) {
5363		/*
5364		 * Clean up code for freeing assoc that left behind a
5365		 * pdapi.. maybe a peer in EEOR that just closed after
5366		 * sending and never indicated a EOR.
5367		 */
5368		if (hold_rlock == 0) {
5369			hold_rlock = 1;
5370			SCTP_INP_READ_LOCK(inp);
5371		}
5372		control->held_length = 0;
5373		if (control->data) {
5374			/* Hmm there is data here .. fix */
5375			struct mbuf *m_tmp;
5376			int cnt = 0;
5377
5378			m_tmp = control->data;
5379			while (m_tmp) {
5380				cnt += SCTP_BUF_LEN(m_tmp);
5381				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5382					control->tail_mbuf = m_tmp;
5383					control->end_added = 1;
5384				}
5385				m_tmp = SCTP_BUF_NEXT(m_tmp);
5386			}
5387			control->length = cnt;
5388		} else {
5389			/* remove it */
5390			TAILQ_REMOVE(&inp->read_queue, control, next);
5391			/* Add back any hiddend data */
5392			sctp_free_remote_addr(control->whoFrom);
5393			sctp_free_a_readq(stcb, control);
5394		}
5395		if (hold_rlock) {
5396			hold_rlock = 0;
5397			SCTP_INP_READ_UNLOCK(inp);
5398		}
5399		goto restart;
5400	}
5401	if ((control->length == 0) &&
5402	    (control->end_added == 1)) {
5403		/*
5404		 * Do we also need to check for (control->pdapi_aborted ==
5405		 * 1)?
5406		 */
5407		if (hold_rlock == 0) {
5408			hold_rlock = 1;
5409			SCTP_INP_READ_LOCK(inp);
5410		}
5411		TAILQ_REMOVE(&inp->read_queue, control, next);
5412		if (control->data) {
5413#ifdef INVARIANTS
5414			panic("control->data not null but control->length == 0");
5415#else
5416			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5417			sctp_m_freem(control->data);
5418			control->data = NULL;
5419#endif
5420		}
5421		if (control->aux_data) {
5422			sctp_m_free(control->aux_data);
5423			control->aux_data = NULL;
5424		}
5425#ifdef INVARIANTS
5426		if (control->on_strm_q) {
5427			panic("About to free ctl:%p so:%p and its in %d",
5428			    control, so, control->on_strm_q);
5429		}
5430#endif
5431		sctp_free_remote_addr(control->whoFrom);
5432		sctp_free_a_readq(stcb, control);
5433		if (hold_rlock) {
5434			hold_rlock = 0;
5435			SCTP_INP_READ_UNLOCK(inp);
5436		}
5437		goto restart;
5438	}
5439	if (control->length == 0) {
5440		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5441		    (filling_sinfo)) {
5442			/* find a more suitable one then this */
5443			ctl = TAILQ_NEXT(control, next);
5444			while (ctl) {
5445				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5446				    (ctl->some_taken ||
5447				    (ctl->spec_flags & M_NOTIFICATION) ||
5448				    ((ctl->do_not_ref_stcb == 0) &&
5449				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5450				    ) {
5451					/*-
5452					 * If we have a different TCB next, and there is data
5453					 * present. If we have already taken some (pdapi), OR we can
5454					 * ref the tcb and no delivery as started on this stream, we
5455					 * take it. Note we allow a notification on a different
5456					 * assoc to be delivered..
5457					 */
5458					control = ctl;
5459					goto found_one;
5460				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5461					    (ctl->length) &&
5462					    ((ctl->some_taken) ||
5463					    ((ctl->do_not_ref_stcb == 0) &&
5464					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5465				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5466					/*-
5467					 * If we have the same tcb, and there is data present, and we
5468					 * have the strm interleave feature present. Then if we have
5469					 * taken some (pdapi) or we can refer to tht tcb AND we have
5470					 * not started a delivery for this stream, we can take it.
5471					 * Note we do NOT allow a notificaiton on the same assoc to
5472					 * be delivered.
5473					 */
5474					control = ctl;
5475					goto found_one;
5476				}
5477				ctl = TAILQ_NEXT(ctl, next);
5478			}
5479		}
5480		/*
5481		 * if we reach here, not suitable replacement is available
5482		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5483		 * into the our held count, and its time to sleep again.
5484		 */
5485		held_length = so->so_rcv.sb_cc;
5486		control->held_length = so->so_rcv.sb_cc;
5487		goto restart;
5488	}
5489	/* Clear the held length since there is something to read */
5490	control->held_length = 0;
5491found_one:
5492	/*
5493	 * If we reach here, control has a some data for us to read off.
5494	 * Note that stcb COULD be NULL.
5495	 */
5496	if (hold_rlock == 0) {
5497		hold_rlock = 1;
5498		SCTP_INP_READ_LOCK(inp);
5499	}
5500	control->some_taken++;
5501	stcb = control->stcb;
5502	if (stcb) {
5503		if ((control->do_not_ref_stcb == 0) &&
5504		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5505			if (freecnt_applied == 0)
5506				stcb = NULL;
5507		} else if (control->do_not_ref_stcb == 0) {
5508			/* you can't free it on me please */
5509			/*
5510			 * The lock on the socket buffer protects us so the
5511			 * free code will stop. But since we used the
5512			 * socketbuf lock and the sender uses the tcb_lock
5513			 * to increment, we need to use the atomic add to
5514			 * the refcnt
5515			 */
5516			if (freecnt_applied) {
5517#ifdef INVARIANTS
5518				panic("refcnt already incremented");
5519#else
5520				SCTP_PRINTF("refcnt already incremented?\n");
5521#endif
5522			} else {
5523				atomic_add_int(&stcb->asoc.refcnt, 1);
5524				freecnt_applied = 1;
5525			}
5526			/*
5527			 * Setup to remember how much we have not yet told
5528			 * the peer our rwnd has opened up. Note we grab the
5529			 * value from the tcb from last time. Note too that
5530			 * sack sending clears this when a sack is sent,
5531			 * which is fine. Once we hit the rwnd_req, we then
5532			 * will go to the sctp_user_rcvd() that will not
5533			 * lock until it KNOWs it MUST send a WUP-SACK.
5534			 */
5535			freed_so_far = stcb->freed_by_sorcv_sincelast;
5536			stcb->freed_by_sorcv_sincelast = 0;
5537		}
5538	}
5539	if (stcb &&
5540	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5541	    control->do_not_ref_stcb == 0) {
5542		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5543	}
5544	/* First lets get off the sinfo and sockaddr info */
5545	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5546		sinfo->sinfo_stream = control->sinfo_stream;
5547		sinfo->sinfo_ssn = (uint16_t)control->mid;
5548		sinfo->sinfo_flags = control->sinfo_flags;
5549		sinfo->sinfo_ppid = control->sinfo_ppid;
5550		sinfo->sinfo_context = control->sinfo_context;
5551		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5552		sinfo->sinfo_tsn = control->sinfo_tsn;
5553		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5554		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5555		nxt = TAILQ_NEXT(control, next);
5556		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5557		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5558			struct sctp_extrcvinfo *s_extra;
5559
5560			s_extra = (struct sctp_extrcvinfo *)sinfo;
5561			if ((nxt) &&
5562			    (nxt->length)) {
5563				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5564				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5565					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5566				}
5567				if (nxt->spec_flags & M_NOTIFICATION) {
5568					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5569				}
5570				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5571				s_extra->serinfo_next_length = nxt->length;
5572				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5573				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5574				if (nxt->tail_mbuf != NULL) {
5575					if (nxt->end_added) {
5576						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5577					}
5578				}
5579			} else {
5580				/*
5581				 * we explicitly 0 this, since the memcpy
5582				 * got some other things beyond the older
5583				 * sinfo_ that is on the control's structure
5584				 * :-D
5585				 */
5586				nxt = NULL;
5587				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5588				s_extra->serinfo_next_aid = 0;
5589				s_extra->serinfo_next_length = 0;
5590				s_extra->serinfo_next_ppid = 0;
5591				s_extra->serinfo_next_stream = 0;
5592			}
5593		}
5594		/*
5595		 * update off the real current cum-ack, if we have an stcb.
5596		 */
5597		if ((control->do_not_ref_stcb == 0) && stcb)
5598			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5599		/*
5600		 * mask off the high bits, we keep the actual chunk bits in
5601		 * there.
5602		 */
5603		sinfo->sinfo_flags &= 0x00ff;
5604		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5605			sinfo->sinfo_flags |= SCTP_UNORDERED;
5606		}
5607	}
5608#ifdef SCTP_ASOCLOG_OF_TSNS
5609	{
5610		int index, newindex;
5611		struct sctp_pcbtsn_rlog *entry;
5612
5613		do {
5614			index = inp->readlog_index;
5615			newindex = index + 1;
5616			if (newindex >= SCTP_READ_LOG_SIZE) {
5617				newindex = 0;
5618			}
5619		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5620		entry = &inp->readlog[index];
5621		entry->vtag = control->sinfo_assoc_id;
5622		entry->strm = control->sinfo_stream;
5623		entry->seq = (uint16_t)control->mid;
5624		entry->sz = control->length;
5625		entry->flgs = control->sinfo_flags;
5626	}
5627#endif
5628	if ((fromlen > 0) && (from != NULL)) {
5629		union sctp_sockstore store;
5630		size_t len;
5631
5632		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5633#ifdef INET6
5634		case AF_INET6:
5635			len = sizeof(struct sockaddr_in6);
5636			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5637			store.sin6.sin6_port = control->port_from;
5638			break;
5639#endif
5640#ifdef INET
5641		case AF_INET:
5642#ifdef INET6
5643			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5644				len = sizeof(struct sockaddr_in6);
5645				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5646				    &store.sin6);
5647				store.sin6.sin6_port = control->port_from;
5648			} else {
5649				len = sizeof(struct sockaddr_in);
5650				store.sin = control->whoFrom->ro._l_addr.sin;
5651				store.sin.sin_port = control->port_from;
5652			}
5653#else
5654			len = sizeof(struct sockaddr_in);
5655			store.sin = control->whoFrom->ro._l_addr.sin;
5656			store.sin.sin_port = control->port_from;
5657#endif
5658			break;
5659#endif
5660		default:
5661			len = 0;
5662			break;
5663		}
5664		memcpy(from, &store, min((size_t)fromlen, len));
5665#ifdef INET6
5666		{
5667			struct sockaddr_in6 lsa6, *from6;
5668
5669			from6 = (struct sockaddr_in6 *)from;
5670			sctp_recover_scope_mac(from6, (&lsa6));
5671		}
5672#endif
5673	}
5674	if (hold_rlock) {
5675		SCTP_INP_READ_UNLOCK(inp);
5676		hold_rlock = 0;
5677	}
5678	if (hold_sblock) {
5679		SOCKBUF_UNLOCK(&so->so_rcv);
5680		hold_sblock = 0;
5681	}
5682	/* now copy out what data we can */
5683	if (mp == NULL) {
5684		/* copy out each mbuf in the chain up to length */
5685get_more_data:
5686		m = control->data;
5687		while (m) {
5688			/* Move out all we can */
5689			cp_len = (int)uio->uio_resid;
5690			my_len = (int)SCTP_BUF_LEN(m);
5691			if (cp_len > my_len) {
5692				/* not enough in this buf */
5693				cp_len = my_len;
5694			}
5695			if (hold_rlock) {
5696				SCTP_INP_READ_UNLOCK(inp);
5697				hold_rlock = 0;
5698			}
5699			if (cp_len > 0)
5700				error = uiomove(mtod(m, char *), cp_len, uio);
5701			/* re-read */
5702			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5703				goto release;
5704			}
5705			if ((control->do_not_ref_stcb == 0) && stcb &&
5706			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5707				no_rcv_needed = 1;
5708			}
5709			if (error) {
5710				/* error we are out of here */
5711				goto release;
5712			}
5713			SCTP_INP_READ_LOCK(inp);
5714			hold_rlock = 1;
5715			if (cp_len == SCTP_BUF_LEN(m)) {
5716				if ((SCTP_BUF_NEXT(m) == NULL) &&
5717				    (control->end_added)) {
5718					out_flags |= MSG_EOR;
5719					if ((control->do_not_ref_stcb == 0) &&
5720					    (control->stcb != NULL) &&
5721					    ((control->spec_flags & M_NOTIFICATION) == 0))
5722						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5723				}
5724				if (control->spec_flags & M_NOTIFICATION) {
5725					out_flags |= MSG_NOTIFICATION;
5726				}
5727				/* we ate up the mbuf */
5728				if (in_flags & MSG_PEEK) {
5729					/* just looking */
5730					m = SCTP_BUF_NEXT(m);
5731					copied_so_far += cp_len;
5732				} else {
5733					/* dispose of the mbuf */
5734					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5735						sctp_sblog(&so->so_rcv,
5736						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5737					}
5738					sctp_sbfree(control, stcb, &so->so_rcv, m);
5739					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5740						sctp_sblog(&so->so_rcv,
5741						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5742					}
5743					copied_so_far += cp_len;
5744					freed_so_far += cp_len;
5745					freed_so_far += MSIZE;
5746					atomic_subtract_int(&control->length, cp_len);
5747					control->data = sctp_m_free(m);
5748					m = control->data;
5749					/*
5750					 * been through it all, must hold sb
5751					 * lock ok to null tail
5752					 */
5753					if (control->data == NULL) {
5754#ifdef INVARIANTS
5755						if ((control->end_added == 0) ||
5756						    (TAILQ_NEXT(control, next) == NULL)) {
5757							/*
5758							 * If the end is not
5759							 * added, OR the
5760							 * next is NOT null
5761							 * we MUST have the
5762							 * lock.
5763							 */
5764							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5765								panic("Hmm we don't own the lock?");
5766							}
5767						}
5768#endif
5769						control->tail_mbuf = NULL;
5770#ifdef INVARIANTS
5771						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5772							panic("end_added, nothing left and no MSG_EOR");
5773						}
5774#endif
5775					}
5776				}
5777			} else {
5778				/* Do we need to trim the mbuf? */
5779				if (control->spec_flags & M_NOTIFICATION) {
5780					out_flags |= MSG_NOTIFICATION;
5781				}
5782				if ((in_flags & MSG_PEEK) == 0) {
5783					SCTP_BUF_RESV_UF(m, cp_len);
5784					SCTP_BUF_LEN(m) -= cp_len;
5785					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5786						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5787					}
5788					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5789					if ((control->do_not_ref_stcb == 0) &&
5790					    stcb) {
5791						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5792					}
5793					copied_so_far += cp_len;
5794					freed_so_far += cp_len;
5795					freed_so_far += MSIZE;
5796					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5797						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5798						    SCTP_LOG_SBRESULT, 0);
5799					}
5800					atomic_subtract_int(&control->length, cp_len);
5801				} else {
5802					copied_so_far += cp_len;
5803				}
5804			}
5805			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5806				break;
5807			}
5808			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5809			    (control->do_not_ref_stcb == 0) &&
5810			    (freed_so_far >= rwnd_req)) {
5811				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5812			}
5813		}		/* end while(m) */
5814		/*
5815		 * At this point we have looked at it all and we either have
5816		 * a MSG_EOR/or read all the user wants... <OR>
5817		 * control->length == 0.
5818		 */
5819		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5820			/* we are done with this control */
5821			if (control->length == 0) {
5822				if (control->data) {
5823#ifdef INVARIANTS
5824					panic("control->data not null at read eor?");
5825#else
5826					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5827					sctp_m_freem(control->data);
5828					control->data = NULL;
5829#endif
5830				}
5831		done_with_control:
5832				if (hold_rlock == 0) {
5833					SCTP_INP_READ_LOCK(inp);
5834					hold_rlock = 1;
5835				}
5836				TAILQ_REMOVE(&inp->read_queue, control, next);
5837				/* Add back any hiddend data */
5838				if (control->held_length) {
5839					held_length = 0;
5840					control->held_length = 0;
5841					wakeup_read_socket = 1;
5842				}
5843				if (control->aux_data) {
5844					sctp_m_free(control->aux_data);
5845					control->aux_data = NULL;
5846				}
5847				no_rcv_needed = control->do_not_ref_stcb;
5848				sctp_free_remote_addr(control->whoFrom);
5849				control->data = NULL;
5850#ifdef INVARIANTS
5851				if (control->on_strm_q) {
5852					panic("About to free ctl:%p so:%p and its in %d",
5853					    control, so, control->on_strm_q);
5854				}
5855#endif
5856				sctp_free_a_readq(stcb, control);
5857				control = NULL;
5858				if ((freed_so_far >= rwnd_req) &&
5859				    (no_rcv_needed == 0))
5860					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5861
5862			} else {
5863				/*
5864				 * The user did not read all of this
5865				 * message, turn off the returned MSG_EOR
5866				 * since we are leaving more behind on the
5867				 * control to read.
5868				 */
5869#ifdef INVARIANTS
5870				if (control->end_added &&
5871				    (control->data == NULL) &&
5872				    (control->tail_mbuf == NULL)) {
5873					panic("Gak, control->length is corrupt?");
5874				}
5875#endif
5876				no_rcv_needed = control->do_not_ref_stcb;
5877				out_flags &= ~MSG_EOR;
5878			}
5879		}
5880		if (out_flags & MSG_EOR) {
5881			goto release;
5882		}
5883		if ((uio->uio_resid == 0) ||
5884		    ((in_eeor_mode) &&
5885		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5886			goto release;
5887		}
5888		/*
5889		 * If I hit here the receiver wants more and this message is
5890		 * NOT done (pd-api). So two questions. Can we block? if not
5891		 * we are done. Did the user NOT set MSG_WAITALL?
5892		 */
5893		if (block_allowed == 0) {
5894			goto release;
5895		}
5896		/*
5897		 * We need to wait for more data a few things: - We don't
5898		 * sbunlock() so we don't get someone else reading. - We
5899		 * must be sure to account for the case where what is added
5900		 * is NOT to our control when we wakeup.
5901		 */
5902
5903		/*
5904		 * Do we need to tell the transport a rwnd update might be
5905		 * needed before we go to sleep?
5906		 */
5907		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5908		    ((freed_so_far >= rwnd_req) &&
5909		    (control->do_not_ref_stcb == 0) &&
5910		    (no_rcv_needed == 0))) {
5911			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5912		}
5913wait_some_more:
5914		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5915			goto release;
5916		}
5917		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5918			goto release;
5919
5920		if (hold_rlock == 1) {
5921			SCTP_INP_READ_UNLOCK(inp);
5922			hold_rlock = 0;
5923		}
5924		if (hold_sblock == 0) {
5925			SOCKBUF_LOCK(&so->so_rcv);
5926			hold_sblock = 1;
5927		}
5928		if ((copied_so_far) && (control->length == 0) &&
5929		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5930			goto release;
5931		}
5932		if (so->so_rcv.sb_cc <= control->held_length) {
5933			error = sbwait(&so->so_rcv);
5934			if (error) {
5935				goto release;
5936			}
5937			control->held_length = 0;
5938		}
5939		if (hold_sblock) {
5940			SOCKBUF_UNLOCK(&so->so_rcv);
5941			hold_sblock = 0;
5942		}
5943		if (control->length == 0) {
5944			/* still nothing here */
5945			if (control->end_added == 1) {
5946				/* he aborted, or is done i.e.did a shutdown */
5947				out_flags |= MSG_EOR;
5948				if (control->pdapi_aborted) {
5949					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5950						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5951
5952					out_flags |= MSG_TRUNC;
5953				} else {
5954					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5955						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5956				}
5957				goto done_with_control;
5958			}
5959			if (so->so_rcv.sb_cc > held_length) {
5960				control->held_length = so->so_rcv.sb_cc;
5961				held_length = 0;
5962			}
5963			goto wait_some_more;
5964		} else if (control->data == NULL) {
5965			/*
5966			 * we must re-sync since data is probably being
5967			 * added
5968			 */
5969			SCTP_INP_READ_LOCK(inp);
5970			if ((control->length > 0) && (control->data == NULL)) {
5971				/*
5972				 * big trouble.. we have the lock and its
5973				 * corrupt?
5974				 */
5975#ifdef INVARIANTS
5976				panic("Impossible data==NULL length !=0");
5977#endif
5978				out_flags |= MSG_EOR;
5979				out_flags |= MSG_TRUNC;
5980				control->length = 0;
5981				SCTP_INP_READ_UNLOCK(inp);
5982				goto done_with_control;
5983			}
5984			SCTP_INP_READ_UNLOCK(inp);
5985			/* We will fall around to get more data */
5986		}
5987		goto get_more_data;
5988	} else {
5989		/*-
5990		 * Give caller back the mbuf chain,
5991		 * store in uio_resid the length
5992		 */
5993		wakeup_read_socket = 0;
5994		if ((control->end_added == 0) ||
5995		    (TAILQ_NEXT(control, next) == NULL)) {
5996			/* Need to get rlock */
5997			if (hold_rlock == 0) {
5998				SCTP_INP_READ_LOCK(inp);
5999				hold_rlock = 1;
6000			}
6001		}
6002		if (control->end_added) {
6003			out_flags |= MSG_EOR;
6004			if ((control->do_not_ref_stcb == 0) &&
6005			    (control->stcb != NULL) &&
6006			    ((control->spec_flags & M_NOTIFICATION) == 0))
6007				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6008		}
6009		if (control->spec_flags & M_NOTIFICATION) {
6010			out_flags |= MSG_NOTIFICATION;
6011		}
6012		uio->uio_resid = control->length;
6013		*mp = control->data;
6014		m = control->data;
6015		while (m) {
6016			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6017				sctp_sblog(&so->so_rcv,
6018				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6019			}
6020			sctp_sbfree(control, stcb, &so->so_rcv, m);
6021			freed_so_far += SCTP_BUF_LEN(m);
6022			freed_so_far += MSIZE;
6023			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6024				sctp_sblog(&so->so_rcv,
6025				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6026			}
6027			m = SCTP_BUF_NEXT(m);
6028		}
6029		control->data = control->tail_mbuf = NULL;
6030		control->length = 0;
6031		if (out_flags & MSG_EOR) {
6032			/* Done with this control */
6033			goto done_with_control;
6034		}
6035	}
6036release:
6037	if (hold_rlock == 1) {
6038		SCTP_INP_READ_UNLOCK(inp);
6039		hold_rlock = 0;
6040	}
6041	if (hold_sblock == 1) {
6042		SOCKBUF_UNLOCK(&so->so_rcv);
6043		hold_sblock = 0;
6044	}
6045	sbunlock(&so->so_rcv);
6046	sockbuf_lock = 0;
6047
6048release_unlocked:
6049	if (hold_sblock) {
6050		SOCKBUF_UNLOCK(&so->so_rcv);
6051		hold_sblock = 0;
6052	}
6053	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6054		if ((freed_so_far >= rwnd_req) &&
6055		    (control && (control->do_not_ref_stcb == 0)) &&
6056		    (no_rcv_needed == 0))
6057			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6058	}
6059out:
6060	if (msg_flags) {
6061		*msg_flags = out_flags;
6062	}
6063	if (((out_flags & MSG_EOR) == 0) &&
6064	    ((in_flags & MSG_PEEK) == 0) &&
6065	    (sinfo) &&
6066	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6067	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6068		struct sctp_extrcvinfo *s_extra;
6069
6070		s_extra = (struct sctp_extrcvinfo *)sinfo;
6071		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6072	}
6073	if (hold_rlock == 1) {
6074		SCTP_INP_READ_UNLOCK(inp);
6075	}
6076	if (hold_sblock) {
6077		SOCKBUF_UNLOCK(&so->so_rcv);
6078	}
6079	if (sockbuf_lock) {
6080		sbunlock(&so->so_rcv);
6081	}
6082	if (freecnt_applied) {
6083		/*
6084		 * The lock on the socket buffer protects us so the free
6085		 * code will stop. But since we used the socketbuf lock and
6086		 * the sender uses the tcb_lock to increment, we need to use
6087		 * the atomic add to the refcnt.
6088		 */
6089		if (stcb == NULL) {
6090#ifdef INVARIANTS
6091			panic("stcb for refcnt has gone NULL?");
6092			goto stage_left;
6093#else
6094			goto stage_left;
6095#endif
6096		}
6097		/* Save the value back for next time */
6098		stcb->freed_by_sorcv_sincelast = freed_so_far;
6099		atomic_add_int(&stcb->asoc.refcnt, -1);
6100	}
6101	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6102		if (stcb) {
6103			sctp_misc_ints(SCTP_SORECV_DONE,
6104			    freed_so_far,
6105			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6106			    stcb->asoc.my_rwnd,
6107			    so->so_rcv.sb_cc);
6108		} else {
6109			sctp_misc_ints(SCTP_SORECV_DONE,
6110			    freed_so_far,
6111			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6112			    0,
6113			    so->so_rcv.sb_cc);
6114		}
6115	}
6116stage_left:
6117	if (wakeup_read_socket) {
6118		sctp_sorwakeup(inp, so);
6119	}
6120	return (error);
6121}
6122
6123
6124#ifdef SCTP_MBUF_LOGGING
6125struct mbuf *
6126sctp_m_free(struct mbuf *m)
6127{
6128	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6129		sctp_log_mb(m, SCTP_MBUF_IFREE);
6130	}
6131	return (m_free(m));
6132}
6133
6134void
6135sctp_m_freem(struct mbuf *mb)
6136{
6137	while (mb != NULL)
6138		mb = sctp_m_free(mb);
6139}
6140
6141#endif
6142
6143int
6144sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6145{
6146	/*
6147	 * Given a local address. For all associations that holds the
6148	 * address, request a peer-set-primary.
6149	 */
6150	struct sctp_ifa *ifa;
6151	struct sctp_laddr *wi;
6152
6153	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6154	if (ifa == NULL) {
6155		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6156		return (EADDRNOTAVAIL);
6157	}
6158	/*
6159	 * Now that we have the ifa we must awaken the iterator with this
6160	 * message.
6161	 */
6162	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6163	if (wi == NULL) {
6164		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6165		return (ENOMEM);
6166	}
6167	/* Now incr the count and int wi structure */
6168	SCTP_INCR_LADDR_COUNT();
6169	memset(wi, 0, sizeof(*wi));
6170	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6171	wi->ifa = ifa;
6172	wi->action = SCTP_SET_PRIM_ADDR;
6173	atomic_add_int(&ifa->refcount, 1);
6174
6175	/* Now add it to the work queue */
6176	SCTP_WQ_ADDR_LOCK();
6177	/*
6178	 * Should this really be a tailq? As it is we will process the
6179	 * newest first :-0
6180	 */
6181	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6182	SCTP_WQ_ADDR_UNLOCK();
6183	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6184	    (struct sctp_inpcb *)NULL,
6185	    (struct sctp_tcb *)NULL,
6186	    (struct sctp_nets *)NULL);
6187	return (0);
6188}
6189
6190
6191int
6192sctp_soreceive(struct socket *so,
6193    struct sockaddr **psa,
6194    struct uio *uio,
6195    struct mbuf **mp0,
6196    struct mbuf **controlp,
6197    int *flagsp)
6198{
6199	int error, fromlen;
6200	uint8_t sockbuf[256];
6201	struct sockaddr *from;
6202	struct sctp_extrcvinfo sinfo;
6203	int filling_sinfo = 1;
6204	struct sctp_inpcb *inp;
6205
6206	inp = (struct sctp_inpcb *)so->so_pcb;
6207	/* pickup the assoc we are reading from */
6208	if (inp == NULL) {
6209		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6210		return (EINVAL);
6211	}
6212	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6213	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6214	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6215	    (controlp == NULL)) {
6216		/* user does not want the sndrcv ctl */
6217		filling_sinfo = 0;
6218	}
6219	if (psa) {
6220		from = (struct sockaddr *)sockbuf;
6221		fromlen = sizeof(sockbuf);
6222		from->sa_len = 0;
6223	} else {
6224		from = NULL;
6225		fromlen = 0;
6226	}
6227
6228	if (filling_sinfo) {
6229		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6230	}
6231	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6232	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6233	if (controlp != NULL) {
6234		/* copy back the sinfo in a CMSG format */
6235		if (filling_sinfo)
6236			*controlp = sctp_build_ctl_nchunk(inp,
6237			    (struct sctp_sndrcvinfo *)&sinfo);
6238		else
6239			*controlp = NULL;
6240	}
6241	if (psa) {
6242		/* copy back the address info */
6243		if (from && from->sa_len) {
6244			*psa = sodupsockaddr(from, M_NOWAIT);
6245		} else {
6246			*psa = NULL;
6247		}
6248	}
6249	return (error);
6250}
6251
6252
6253
6254
6255
6256int
6257sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6258    int totaddr, int *error)
6259{
6260	int added = 0;
6261	int i;
6262	struct sctp_inpcb *inp;
6263	struct sockaddr *sa;
6264	size_t incr = 0;
6265#ifdef INET
6266	struct sockaddr_in *sin;
6267#endif
6268#ifdef INET6
6269	struct sockaddr_in6 *sin6;
6270#endif
6271
6272	sa = addr;
6273	inp = stcb->sctp_ep;
6274	*error = 0;
6275	for (i = 0; i < totaddr; i++) {
6276		switch (sa->sa_family) {
6277#ifdef INET
6278		case AF_INET:
6279			incr = sizeof(struct sockaddr_in);
6280			sin = (struct sockaddr_in *)sa;
6281			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6282			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6283			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6284				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6285				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6286				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6287				*error = EINVAL;
6288				goto out_now;
6289			}
6290			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6291			    SCTP_DONOT_SETSCOPE,
6292			    SCTP_ADDR_IS_CONFIRMED)) {
6293				/* assoc gone no un-lock */
6294				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6295				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6296				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6297				*error = ENOBUFS;
6298				goto out_now;
6299			}
6300			added++;
6301			break;
6302#endif
6303#ifdef INET6
6304		case AF_INET6:
6305			incr = sizeof(struct sockaddr_in6);
6306			sin6 = (struct sockaddr_in6 *)sa;
6307			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6308			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6309				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6310				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6311				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6312				*error = EINVAL;
6313				goto out_now;
6314			}
6315			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6316			    SCTP_DONOT_SETSCOPE,
6317			    SCTP_ADDR_IS_CONFIRMED)) {
6318				/* assoc gone no un-lock */
6319				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6320				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6321				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6322				*error = ENOBUFS;
6323				goto out_now;
6324			}
6325			added++;
6326			break;
6327#endif
6328		default:
6329			break;
6330		}
6331		sa = (struct sockaddr *)((caddr_t)sa + incr);
6332	}
6333out_now:
6334	return (added);
6335}
6336
6337struct sctp_tcb *
6338sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6339    unsigned int *totaddr,
6340    unsigned int *num_v4, unsigned int *num_v6, int *error,
6341    unsigned int limit, int *bad_addr)
6342{
6343	struct sockaddr *sa;
6344	struct sctp_tcb *stcb = NULL;
6345	unsigned int incr, at, i;
6346
6347	at = 0;
6348	sa = addr;
6349	*error = *num_v6 = *num_v4 = 0;
6350	/* account and validate addresses */
6351	for (i = 0; i < *totaddr; i++) {
6352		switch (sa->sa_family) {
6353#ifdef INET
6354		case AF_INET:
6355			incr = (unsigned int)sizeof(struct sockaddr_in);
6356			if (sa->sa_len != incr) {
6357				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6358				*error = EINVAL;
6359				*bad_addr = 1;
6360				return (NULL);
6361			}
6362			(*num_v4) += 1;
6363			break;
6364#endif
6365#ifdef INET6
6366		case AF_INET6:
6367			{
6368				struct sockaddr_in6 *sin6;
6369
6370				sin6 = (struct sockaddr_in6 *)sa;
6371				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6372					/* Must be non-mapped for connectx */
6373					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6374					*error = EINVAL;
6375					*bad_addr = 1;
6376					return (NULL);
6377				}
6378				incr = (unsigned int)sizeof(struct sockaddr_in6);
6379				if (sa->sa_len != incr) {
6380					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6381					*error = EINVAL;
6382					*bad_addr = 1;
6383					return (NULL);
6384				}
6385				(*num_v6) += 1;
6386				break;
6387			}
6388#endif
6389		default:
6390			*totaddr = i;
6391			incr = 0;
6392			/* we are done */
6393			break;
6394		}
6395		if (i == *totaddr) {
6396			break;
6397		}
6398		SCTP_INP_INCR_REF(inp);
6399		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6400		if (stcb != NULL) {
6401			/* Already have or am bring up an association */
6402			return (stcb);
6403		} else {
6404			SCTP_INP_DECR_REF(inp);
6405		}
6406		if ((at + incr) > limit) {
6407			*totaddr = i;
6408			break;
6409		}
6410		sa = (struct sockaddr *)((caddr_t)sa + incr);
6411	}
6412	return ((struct sctp_tcb *)NULL);
6413}
6414
6415/*
6416 * sctp_bindx(ADD) for one address.
6417 * assumes all arguments are valid/checked by caller.
6418 */
6419void
6420sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6421    struct sockaddr *sa, sctp_assoc_t assoc_id,
6422    uint32_t vrf_id, int *error, void *p)
6423{
6424	struct sockaddr *addr_touse;
6425#if defined(INET) && defined(INET6)
6426	struct sockaddr_in sin;
6427#endif
6428
6429	/* see if we're bound all already! */
6430	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6431		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6432		*error = EINVAL;
6433		return;
6434	}
6435	addr_touse = sa;
6436#ifdef INET6
6437	if (sa->sa_family == AF_INET6) {
6438#ifdef INET
6439		struct sockaddr_in6 *sin6;
6440
6441#endif
6442		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6443			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6444			*error = EINVAL;
6445			return;
6446		}
6447		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6448			/* can only bind v6 on PF_INET6 sockets */
6449			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6450			*error = EINVAL;
6451			return;
6452		}
6453#ifdef INET
6454		sin6 = (struct sockaddr_in6 *)addr_touse;
6455		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6456			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6457			    SCTP_IPV6_V6ONLY(inp)) {
6458				/* can't bind v4-mapped on PF_INET sockets */
6459				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6460				*error = EINVAL;
6461				return;
6462			}
6463			in6_sin6_2_sin(&sin, sin6);
6464			addr_touse = (struct sockaddr *)&sin;
6465		}
6466#endif
6467	}
6468#endif
6469#ifdef INET
6470	if (sa->sa_family == AF_INET) {
6471		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6472			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6473			*error = EINVAL;
6474			return;
6475		}
6476		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6477		    SCTP_IPV6_V6ONLY(inp)) {
6478			/* can't bind v4 on PF_INET sockets */
6479			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6480			*error = EINVAL;
6481			return;
6482		}
6483	}
6484#endif
6485	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6486		if (p == NULL) {
6487			/* Can't get proc for Net/Open BSD */
6488			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6489			*error = EINVAL;
6490			return;
6491		}
6492		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6493		return;
6494	}
6495	/*
6496	 * No locks required here since bind and mgmt_ep_sa all do their own
6497	 * locking. If we do something for the FIX: below we may need to
6498	 * lock in that case.
6499	 */
6500	if (assoc_id == 0) {
6501		/* add the address */
6502		struct sctp_inpcb *lep;
6503		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6504
6505		/* validate the incoming port */
6506		if ((lsin->sin_port != 0) &&
6507		    (lsin->sin_port != inp->sctp_lport)) {
6508			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6509			*error = EINVAL;
6510			return;
6511		} else {
6512			/* user specified 0 port, set it to existing port */
6513			lsin->sin_port = inp->sctp_lport;
6514		}
6515
6516		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6517		if (lep != NULL) {
6518			/*
6519			 * We must decrement the refcount since we have the
6520			 * ep already and are binding. No remove going on
6521			 * here.
6522			 */
6523			SCTP_INP_DECR_REF(lep);
6524		}
6525		if (lep == inp) {
6526			/* already bound to it.. ok */
6527			return;
6528		} else if (lep == NULL) {
6529			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6530			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6531			    SCTP_ADD_IP_ADDRESS,
6532			    vrf_id, NULL);
6533		} else {
6534			*error = EADDRINUSE;
6535		}
6536		if (*error)
6537			return;
6538	} else {
6539		/*
6540		 * FIX: decide whether we allow assoc based bindx
6541		 */
6542	}
6543}
6544
6545/*
6546 * sctp_bindx(DELETE) for one address.
6547 * assumes all arguments are valid/checked by caller.
6548 */
6549void
6550sctp_bindx_delete_address(struct sctp_inpcb *inp,
6551    struct sockaddr *sa, sctp_assoc_t assoc_id,
6552    uint32_t vrf_id, int *error)
6553{
6554	struct sockaddr *addr_touse;
6555#if defined(INET) && defined(INET6)
6556	struct sockaddr_in sin;
6557#endif
6558
6559	/* see if we're bound all already! */
6560	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6561		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6562		*error = EINVAL;
6563		return;
6564	}
6565	addr_touse = sa;
6566#ifdef INET6
6567	if (sa->sa_family == AF_INET6) {
6568#ifdef INET
6569		struct sockaddr_in6 *sin6;
6570#endif
6571
6572		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6573			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6574			*error = EINVAL;
6575			return;
6576		}
6577		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6578			/* can only bind v6 on PF_INET6 sockets */
6579			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6580			*error = EINVAL;
6581			return;
6582		}
6583#ifdef INET
6584		sin6 = (struct sockaddr_in6 *)addr_touse;
6585		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6586			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6587			    SCTP_IPV6_V6ONLY(inp)) {
6588				/* can't bind mapped-v4 on PF_INET sockets */
6589				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590				*error = EINVAL;
6591				return;
6592			}
6593			in6_sin6_2_sin(&sin, sin6);
6594			addr_touse = (struct sockaddr *)&sin;
6595		}
6596#endif
6597	}
6598#endif
6599#ifdef INET
6600	if (sa->sa_family == AF_INET) {
6601		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6602			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6603			*error = EINVAL;
6604			return;
6605		}
6606		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6607		    SCTP_IPV6_V6ONLY(inp)) {
6608			/* can't bind v4 on PF_INET sockets */
6609			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6610			*error = EINVAL;
6611			return;
6612		}
6613	}
6614#endif
6615	/*
6616	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6617	 * below is ever changed we may need to lock before calling
6618	 * association level binding.
6619	 */
6620	if (assoc_id == 0) {
6621		/* delete the address */
6622		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6623		    SCTP_DEL_IP_ADDRESS,
6624		    vrf_id, NULL);
6625	} else {
6626		/*
6627		 * FIX: decide whether we allow assoc based bindx
6628		 */
6629	}
6630}
6631
6632/*
6633 * returns the valid local address count for an assoc, taking into account
6634 * all scoping rules
6635 */
6636int
6637sctp_local_addr_count(struct sctp_tcb *stcb)
6638{
6639	int loopback_scope;
6640#if defined(INET)
6641	int ipv4_local_scope, ipv4_addr_legal;
6642#endif
6643#if defined (INET6)
6644	int local_scope, site_scope, ipv6_addr_legal;
6645#endif
6646	struct sctp_vrf *vrf;
6647	struct sctp_ifn *sctp_ifn;
6648	struct sctp_ifa *sctp_ifa;
6649	int count = 0;
6650
6651	/* Turn on all the appropriate scopes */
6652	loopback_scope = stcb->asoc.scope.loopback_scope;
6653#if defined(INET)
6654	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6655	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6656#endif
6657#if defined(INET6)
6658	local_scope = stcb->asoc.scope.local_scope;
6659	site_scope = stcb->asoc.scope.site_scope;
6660	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6661#endif
6662	SCTP_IPI_ADDR_RLOCK();
6663	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6664	if (vrf == NULL) {
6665		/* no vrf, no addresses */
6666		SCTP_IPI_ADDR_RUNLOCK();
6667		return (0);
6668	}
6669	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6670		/*
6671		 * bound all case: go through all ifns on the vrf
6672		 */
6673		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6674			if ((loopback_scope == 0) &&
6675			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6676				continue;
6677			}
6678			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6679				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6680					continue;
6681				switch (sctp_ifa->address.sa.sa_family) {
6682#ifdef INET
6683				case AF_INET:
6684					if (ipv4_addr_legal) {
6685						struct sockaddr_in *sin;
6686
6687						sin = &sctp_ifa->address.sin;
6688						if (sin->sin_addr.s_addr == 0) {
6689							/*
6690							 * skip unspecified
6691							 * addrs
6692							 */
6693							continue;
6694						}
6695						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6696						    &sin->sin_addr) != 0) {
6697							continue;
6698						}
6699						if ((ipv4_local_scope == 0) &&
6700						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6701							continue;
6702						}
6703						/* count this one */
6704						count++;
6705					} else {
6706						continue;
6707					}
6708					break;
6709#endif
6710#ifdef INET6
6711				case AF_INET6:
6712					if (ipv6_addr_legal) {
6713						struct sockaddr_in6 *sin6;
6714
6715						sin6 = &sctp_ifa->address.sin6;
6716						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6717							continue;
6718						}
6719						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6720						    &sin6->sin6_addr) != 0) {
6721							continue;
6722						}
6723						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6724							if (local_scope == 0)
6725								continue;
6726							if (sin6->sin6_scope_id == 0) {
6727								if (sa6_recoverscope(sin6) != 0)
6728									/*
6729									 *
6730									 * bad
6731									 * link
6732									 *
6733									 * local
6734									 *
6735									 * address
6736									 */
6737									continue;
6738							}
6739						}
6740						if ((site_scope == 0) &&
6741						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6742							continue;
6743						}
6744						/* count this one */
6745						count++;
6746					}
6747					break;
6748#endif
6749				default:
6750					/* TSNH */
6751					break;
6752				}
6753			}
6754		}
6755	} else {
6756		/*
6757		 * subset bound case
6758		 */
6759		struct sctp_laddr *laddr;
6760
6761		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6762		    sctp_nxt_addr) {
6763			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6764				continue;
6765			}
6766			/* count this one */
6767			count++;
6768		}
6769	}
6770	SCTP_IPI_ADDR_RUNLOCK();
6771	return (count);
6772}
6773
6774#if defined(SCTP_LOCAL_TRACE_BUF)
6775
6776void
6777sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6778{
6779	uint32_t saveindex, newindex;
6780
6781	do {
6782		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6783		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6784			newindex = 1;
6785		} else {
6786			newindex = saveindex + 1;
6787		}
6788	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6789	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6790		saveindex = 0;
6791	}
6792	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6793	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6794	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6795	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6796	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6797	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6798	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6799	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6800}
6801
6802#endif
6803static void
6804sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6805    const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6806{
6807	struct ip *iph;
6808#ifdef INET6
6809	struct ip6_hdr *ip6;
6810#endif
6811	struct mbuf *sp, *last;
6812	struct udphdr *uhdr;
6813	uint16_t port;
6814
6815	if ((m->m_flags & M_PKTHDR) == 0) {
6816		/* Can't handle one that is not a pkt hdr */
6817		goto out;
6818	}
6819	/* Pull the src port */
6820	iph = mtod(m, struct ip *);
6821	uhdr = (struct udphdr *)((caddr_t)iph + off);
6822	port = uhdr->uh_sport;
6823	/*
6824	 * Split out the mbuf chain. Leave the IP header in m, place the
6825	 * rest in the sp.
6826	 */
6827	sp = m_split(m, off, M_NOWAIT);
6828	if (sp == NULL) {
6829		/* Gak, drop packet, we can't do a split */
6830		goto out;
6831	}
6832	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6833		/* Gak, packet can't have an SCTP header in it - too small */
6834		m_freem(sp);
6835		goto out;
6836	}
6837	/* Now pull up the UDP header and SCTP header together */
6838	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6839	if (sp == NULL) {
6840		/* Gak pullup failed */
6841		goto out;
6842	}
6843	/* Trim out the UDP header */
6844	m_adj(sp, sizeof(struct udphdr));
6845
6846	/* Now reconstruct the mbuf chain */
6847	for (last = m; last->m_next; last = last->m_next);
6848	last->m_next = sp;
6849	m->m_pkthdr.len += sp->m_pkthdr.len;
6850	/*
6851	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6852	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6853	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6854	 * SCTP checksum. Therefore, clear the bit.
6855	 */
6856	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6857	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6858	    m->m_pkthdr.len,
6859	    if_name(m->m_pkthdr.rcvif),
6860	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6861	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6862	iph = mtod(m, struct ip *);
6863	switch (iph->ip_v) {
6864#ifdef INET
6865	case IPVERSION:
6866		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6867		sctp_input_with_port(m, off, port);
6868		break;
6869#endif
6870#ifdef INET6
6871	case IPV6_VERSION >> 4:
6872		ip6 = mtod(m, struct ip6_hdr *);
6873		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6874		sctp6_input_with_port(&m, &off, port);
6875		break;
6876#endif
6877	default:
6878		goto out;
6879		break;
6880	}
6881	return;
6882out:
6883	m_freem(m);
6884}
6885
6886#ifdef INET
6887static void
6888sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6889{
6890	struct ip *outer_ip, *inner_ip;
6891	struct sctphdr *sh;
6892	struct icmp *icmp;
6893	struct udphdr *udp;
6894	struct sctp_inpcb *inp;
6895	struct sctp_tcb *stcb;
6896	struct sctp_nets *net;
6897	struct sctp_init_chunk *ch;
6898	struct sockaddr_in src, dst;
6899	uint8_t type, code;
6900
6901	inner_ip = (struct ip *)vip;
6902	icmp = (struct icmp *)((caddr_t)inner_ip -
6903	    (sizeof(struct icmp) - sizeof(struct ip)));
6904	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6905	if (ntohs(outer_ip->ip_len) <
6906	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6907		return;
6908	}
6909	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6910	sh = (struct sctphdr *)(udp + 1);
6911	memset(&src, 0, sizeof(struct sockaddr_in));
6912	src.sin_family = AF_INET;
6913	src.sin_len = sizeof(struct sockaddr_in);
6914	src.sin_port = sh->src_port;
6915	src.sin_addr = inner_ip->ip_src;
6916	memset(&dst, 0, sizeof(struct sockaddr_in));
6917	dst.sin_family = AF_INET;
6918	dst.sin_len = sizeof(struct sockaddr_in);
6919	dst.sin_port = sh->dest_port;
6920	dst.sin_addr = inner_ip->ip_dst;
6921	/*
6922	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6923	 * holds our local endpoint address. Thus we reverse the dst and the
6924	 * src in the lookup.
6925	 */
6926	inp = NULL;
6927	net = NULL;
6928	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6929	    (struct sockaddr *)&src,
6930	    &inp, &net, 1,
6931	    SCTP_DEFAULT_VRFID);
6932	if ((stcb != NULL) &&
6933	    (net != NULL) &&
6934	    (inp != NULL)) {
6935		/* Check the UDP port numbers */
6936		if ((udp->uh_dport != net->port) ||
6937		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6938			SCTP_TCB_UNLOCK(stcb);
6939			return;
6940		}
6941		/* Check the verification tag */
6942		if (ntohl(sh->v_tag) != 0) {
6943			/*
6944			 * This must be the verification tag used for
6945			 * sending out packets. We don't consider packets
6946			 * reflecting the verification tag.
6947			 */
6948			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6949				SCTP_TCB_UNLOCK(stcb);
6950				return;
6951			}
6952		} else {
6953			if (ntohs(outer_ip->ip_len) >=
6954			    sizeof(struct ip) +
6955			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6956				/*
6957				 * In this case we can check if we got an
6958				 * INIT chunk and if the initiate tag
6959				 * matches.
6960				 */
6961				ch = (struct sctp_init_chunk *)(sh + 1);
6962				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6963				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6964					SCTP_TCB_UNLOCK(stcb);
6965					return;
6966				}
6967			} else {
6968				SCTP_TCB_UNLOCK(stcb);
6969				return;
6970			}
6971		}
6972		type = icmp->icmp_type;
6973		code = icmp->icmp_code;
6974		if ((type == ICMP_UNREACH) &&
6975		    (code == ICMP_UNREACH_PORT)) {
6976			code = ICMP_UNREACH_PROTOCOL;
6977		}
6978		sctp_notify(inp, stcb, net, type, code,
6979		    ntohs(inner_ip->ip_len),
6980		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6981	} else {
6982		if ((stcb == NULL) && (inp != NULL)) {
6983			/* reduce ref-count */
6984			SCTP_INP_WLOCK(inp);
6985			SCTP_INP_DECR_REF(inp);
6986			SCTP_INP_WUNLOCK(inp);
6987		}
6988		if (stcb) {
6989			SCTP_TCB_UNLOCK(stcb);
6990		}
6991	}
6992	return;
6993}
6994#endif
6995
6996#ifdef INET6
6997static void
6998sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6999{
7000	struct ip6ctlparam *ip6cp;
7001	struct sctp_inpcb *inp;
7002	struct sctp_tcb *stcb;
7003	struct sctp_nets *net;
7004	struct sctphdr sh;
7005	struct udphdr udp;
7006	struct sockaddr_in6 src, dst;
7007	uint8_t type, code;
7008
7009	ip6cp = (struct ip6ctlparam *)d;
7010	/*
7011	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7012	 */
7013	if (ip6cp->ip6c_m == NULL) {
7014		return;
7015	}
7016	/*
7017	 * Check if we can safely examine the ports and the verification tag
7018	 * of the SCTP common header.
7019	 */
7020	if (ip6cp->ip6c_m->m_pkthdr.len <
7021	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7022		return;
7023	}
7024	/* Copy out the UDP header. */
7025	memset(&udp, 0, sizeof(struct udphdr));
7026	m_copydata(ip6cp->ip6c_m,
7027	    ip6cp->ip6c_off,
7028	    sizeof(struct udphdr),
7029	    (caddr_t)&udp);
7030	/* Copy out the port numbers and the verification tag. */
7031	memset(&sh, 0, sizeof(struct sctphdr));
7032	m_copydata(ip6cp->ip6c_m,
7033	    ip6cp->ip6c_off + sizeof(struct udphdr),
7034	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7035	    (caddr_t)&sh);
7036	memset(&src, 0, sizeof(struct sockaddr_in6));
7037	src.sin6_family = AF_INET6;
7038	src.sin6_len = sizeof(struct sockaddr_in6);
7039	src.sin6_port = sh.src_port;
7040	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7041	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7042		return;
7043	}
7044	memset(&dst, 0, sizeof(struct sockaddr_in6));
7045	dst.sin6_family = AF_INET6;
7046	dst.sin6_len = sizeof(struct sockaddr_in6);
7047	dst.sin6_port = sh.dest_port;
7048	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7049	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7050		return;
7051	}
7052	inp = NULL;
7053	net = NULL;
7054	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7055	    (struct sockaddr *)&src,
7056	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7057	if ((stcb != NULL) &&
7058	    (net != NULL) &&
7059	    (inp != NULL)) {
7060		/* Check the UDP port numbers */
7061		if ((udp.uh_dport != net->port) ||
7062		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7063			SCTP_TCB_UNLOCK(stcb);
7064			return;
7065		}
7066		/* Check the verification tag */
7067		if (ntohl(sh.v_tag) != 0) {
7068			/*
7069			 * This must be the verification tag used for
7070			 * sending out packets. We don't consider packets
7071			 * reflecting the verification tag.
7072			 */
7073			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7074				SCTP_TCB_UNLOCK(stcb);
7075				return;
7076			}
7077		} else {
7078			if (ip6cp->ip6c_m->m_pkthdr.len >=
7079			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7080			    sizeof(struct sctphdr) +
7081			    sizeof(struct sctp_chunkhdr) +
7082			    offsetof(struct sctp_init, a_rwnd)) {
7083				/*
7084				 * In this case we can check if we got an
7085				 * INIT chunk and if the initiate tag
7086				 * matches.
7087				 */
7088				uint32_t initiate_tag;
7089				uint8_t chunk_type;
7090
7091				m_copydata(ip6cp->ip6c_m,
7092				    ip6cp->ip6c_off +
7093				    sizeof(struct udphdr) +
7094				    sizeof(struct sctphdr),
7095				    sizeof(uint8_t),
7096				    (caddr_t)&chunk_type);
7097				m_copydata(ip6cp->ip6c_m,
7098				    ip6cp->ip6c_off +
7099				    sizeof(struct udphdr) +
7100				    sizeof(struct sctphdr) +
7101				    sizeof(struct sctp_chunkhdr),
7102				    sizeof(uint32_t),
7103				    (caddr_t)&initiate_tag);
7104				if ((chunk_type != SCTP_INITIATION) ||
7105				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7106					SCTP_TCB_UNLOCK(stcb);
7107					return;
7108				}
7109			} else {
7110				SCTP_TCB_UNLOCK(stcb);
7111				return;
7112			}
7113		}
7114		type = ip6cp->ip6c_icmp6->icmp6_type;
7115		code = ip6cp->ip6c_icmp6->icmp6_code;
7116		if ((type == ICMP6_DST_UNREACH) &&
7117		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7118			type = ICMP6_PARAM_PROB;
7119			code = ICMP6_PARAMPROB_NEXTHEADER;
7120		}
7121		sctp6_notify(inp, stcb, net, type, code,
7122		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7123	} else {
7124		if ((stcb == NULL) && (inp != NULL)) {
7125			/* reduce inp's ref-count */
7126			SCTP_INP_WLOCK(inp);
7127			SCTP_INP_DECR_REF(inp);
7128			SCTP_INP_WUNLOCK(inp);
7129		}
7130		if (stcb) {
7131			SCTP_TCB_UNLOCK(stcb);
7132		}
7133	}
7134}
7135#endif
7136
7137void
7138sctp_over_udp_stop(void)
7139{
7140	/*
7141	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7142	 * for writting!
7143	 */
7144#ifdef INET
7145	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7146		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7147		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7148	}
7149#endif
7150#ifdef INET6
7151	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7152		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7153		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7154	}
7155#endif
7156}
7157
7158int
7159sctp_over_udp_start(void)
7160{
7161	uint16_t port;
7162	int ret;
7163#ifdef INET
7164	struct sockaddr_in sin;
7165#endif
7166#ifdef INET6
7167	struct sockaddr_in6 sin6;
7168#endif
7169	/*
7170	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7171	 * for writting!
7172	 */
7173	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7174	if (ntohs(port) == 0) {
7175		/* Must have a port set */
7176		return (EINVAL);
7177	}
7178#ifdef INET
7179	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7180		/* Already running -- must stop first */
7181		return (EALREADY);
7182	}
7183#endif
7184#ifdef INET6
7185	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7186		/* Already running -- must stop first */
7187		return (EALREADY);
7188	}
7189#endif
7190#ifdef INET
7191	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7192	    SOCK_DGRAM, IPPROTO_UDP,
7193	    curthread->td_ucred, curthread))) {
7194		sctp_over_udp_stop();
7195		return (ret);
7196	}
7197	/* Call the special UDP hook. */
7198	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7199	    sctp_recv_udp_tunneled_packet,
7200	    sctp_recv_icmp_tunneled_packet,
7201	    NULL))) {
7202		sctp_over_udp_stop();
7203		return (ret);
7204	}
7205	/* Ok, we have a socket, bind it to the port. */
7206	memset(&sin, 0, sizeof(struct sockaddr_in));
7207	sin.sin_len = sizeof(struct sockaddr_in);
7208	sin.sin_family = AF_INET;
7209	sin.sin_port = htons(port);
7210	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7211	    (struct sockaddr *)&sin, curthread))) {
7212		sctp_over_udp_stop();
7213		return (ret);
7214	}
7215#endif
7216#ifdef INET6
7217	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7218	    SOCK_DGRAM, IPPROTO_UDP,
7219	    curthread->td_ucred, curthread))) {
7220		sctp_over_udp_stop();
7221		return (ret);
7222	}
7223	/* Call the special UDP hook. */
7224	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7225	    sctp_recv_udp_tunneled_packet,
7226	    sctp_recv_icmp6_tunneled_packet,
7227	    NULL))) {
7228		sctp_over_udp_stop();
7229		return (ret);
7230	}
7231	/* Ok, we have a socket, bind it to the port. */
7232	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7233	sin6.sin6_len = sizeof(struct sockaddr_in6);
7234	sin6.sin6_family = AF_INET6;
7235	sin6.sin6_port = htons(port);
7236	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7237	    (struct sockaddr *)&sin6, curthread))) {
7238		sctp_over_udp_stop();
7239		return (ret);
7240	}
7241#endif
7242	return (0);
7243}
7244
7245#if defined(INET6) || defined(INET)
7246
7247/*
7248 * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7249 * If all arguments are zero, zero is returned.
7250 */
7251uint32_t
7252sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7253{
7254	if (mtu1 > 0) {
7255		if (mtu2 > 0) {
7256			if (mtu3 > 0) {
7257				return (min(mtu1, min(mtu2, mtu3)));
7258			} else {
7259				return (min(mtu1, mtu2));
7260			}
7261		} else {
7262			if (mtu3 > 0) {
7263				return (min(mtu1, mtu3));
7264			} else {
7265				return (mtu1);
7266			}
7267		}
7268	} else {
7269		if (mtu2 > 0) {
7270			if (mtu3 > 0) {
7271				return (min(mtu2, mtu3));
7272			} else {
7273				return (mtu2);
7274			}
7275		} else {
7276			return (mtu3);
7277		}
7278	}
7279}
7280
7281void
7282sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7283{
7284	struct in_conninfo inc;
7285
7286	memset(&inc, 0, sizeof(struct in_conninfo));
7287	inc.inc_fibnum = fibnum;
7288	switch (addr->sa.sa_family) {
7289#ifdef INET
7290	case AF_INET:
7291		inc.inc_faddr = addr->sin.sin_addr;
7292		break;
7293#endif
7294#ifdef INET6
7295	case AF_INET6:
7296		inc.inc_flags |= INC_ISIPV6;
7297		inc.inc6_faddr = addr->sin6.sin6_addr;
7298		break;
7299#endif
7300	default:
7301		return;
7302	}
7303	tcp_hc_updatemtu(&inc, (u_long)mtu);
7304}
7305
7306uint32_t
7307sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7308{
7309	struct in_conninfo inc;
7310
7311	memset(&inc, 0, sizeof(struct in_conninfo));
7312	inc.inc_fibnum = fibnum;
7313	switch (addr->sa.sa_family) {
7314#ifdef INET
7315	case AF_INET:
7316		inc.inc_faddr = addr->sin.sin_addr;
7317		break;
7318#endif
7319#ifdef INET6
7320	case AF_INET6:
7321		inc.inc_flags |= INC_ISIPV6;
7322		inc.inc6_faddr = addr->sin6.sin6_addr;
7323		break;
7324#endif
7325	default:
7326		return (0);
7327	}
7328	return ((uint32_t)tcp_hc_getmtu(&inc));
7329}
7330#endif
7331