sctputil.c revision 270361
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctputil.c 270361 2014-08-22 20:16:26Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctputil.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
41#ifdef INET6
42#include <netinet6/sctp6_var.h>
43#endif
44#include <netinet/sctp_header.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_asconf.h>
51#include <netinet/sctp_bsd_addr.h>
52#include <netinet/udp.h>
53#include <netinet/udp_var.h>
54#include <sys/proc.h>
55
56
57#ifndef KTR_SCTP
58#define KTR_SCTP KTR_SUBSYS
59#endif
60
61extern struct sctp_cc_functions sctp_cc_functions[];
62extern struct sctp_ss_functions sctp_ss_functions[];
63
64void
65sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66{
67	struct sctp_cwnd_log sctp_clog;
68
69	sctp_clog.x.sb.stcb = stcb;
70	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71	if (stcb)
72		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73	else
74		sctp_clog.x.sb.stcb_sbcc = 0;
75	sctp_clog.x.sb.incr = incr;
76	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77	    SCTP_LOG_EVENT_SB,
78	    from,
79	    sctp_clog.x.misc.log1,
80	    sctp_clog.x.misc.log2,
81	    sctp_clog.x.misc.log3,
82	    sctp_clog.x.misc.log4);
83}
84
85void
86sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87{
88	struct sctp_cwnd_log sctp_clog;
89
90	sctp_clog.x.close.inp = (void *)inp;
91	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92	if (stcb) {
93		sctp_clog.x.close.stcb = (void *)stcb;
94		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95	} else {
96		sctp_clog.x.close.stcb = 0;
97		sctp_clog.x.close.state = 0;
98	}
99	sctp_clog.x.close.loc = loc;
100	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101	    SCTP_LOG_EVENT_CLOSE,
102	    0,
103	    sctp_clog.x.misc.log1,
104	    sctp_clog.x.misc.log2,
105	    sctp_clog.x.misc.log3,
106	    sctp_clog.x.misc.log4);
107}
108
109void
110rto_logging(struct sctp_nets *net, int from)
111{
112	struct sctp_cwnd_log sctp_clog;
113
114	memset(&sctp_clog, 0, sizeof(sctp_clog));
115	sctp_clog.x.rto.net = (void *)net;
116	sctp_clog.x.rto.rtt = net->rtt / 1000;
117	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118	    SCTP_LOG_EVENT_RTT,
119	    from,
120	    sctp_clog.x.misc.log1,
121	    sctp_clog.x.misc.log2,
122	    sctp_clog.x.misc.log3,
123	    sctp_clog.x.misc.log4);
124}
125
126void
127sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128{
129	struct sctp_cwnd_log sctp_clog;
130
131	sctp_clog.x.strlog.stcb = stcb;
132	sctp_clog.x.strlog.n_tsn = tsn;
133	sctp_clog.x.strlog.n_sseq = sseq;
134	sctp_clog.x.strlog.e_tsn = 0;
135	sctp_clog.x.strlog.e_sseq = 0;
136	sctp_clog.x.strlog.strm = stream;
137	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138	    SCTP_LOG_EVENT_STRM,
139	    from,
140	    sctp_clog.x.misc.log1,
141	    sctp_clog.x.misc.log2,
142	    sctp_clog.x.misc.log3,
143	    sctp_clog.x.misc.log4);
144}
145
146void
147sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148{
149	struct sctp_cwnd_log sctp_clog;
150
151	sctp_clog.x.nagle.stcb = (void *)stcb;
152	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157	    SCTP_LOG_EVENT_NAGLE,
158	    action,
159	    sctp_clog.x.misc.log1,
160	    sctp_clog.x.misc.log2,
161	    sctp_clog.x.misc.log3,
162	    sctp_clog.x.misc.log4);
163}
164
165void
166sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167{
168	struct sctp_cwnd_log sctp_clog;
169
170	sctp_clog.x.sack.cumack = cumack;
171	sctp_clog.x.sack.oldcumack = old_cumack;
172	sctp_clog.x.sack.tsn = tsn;
173	sctp_clog.x.sack.numGaps = gaps;
174	sctp_clog.x.sack.numDups = dups;
175	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176	    SCTP_LOG_EVENT_SACK,
177	    from,
178	    sctp_clog.x.misc.log1,
179	    sctp_clog.x.misc.log2,
180	    sctp_clog.x.misc.log3,
181	    sctp_clog.x.misc.log4);
182}
183
184void
185sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186{
187	struct sctp_cwnd_log sctp_clog;
188
189	memset(&sctp_clog, 0, sizeof(sctp_clog));
190	sctp_clog.x.map.base = map;
191	sctp_clog.x.map.cum = cum;
192	sctp_clog.x.map.high = high;
193	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194	    SCTP_LOG_EVENT_MAP,
195	    from,
196	    sctp_clog.x.misc.log1,
197	    sctp_clog.x.misc.log2,
198	    sctp_clog.x.misc.log3,
199	    sctp_clog.x.misc.log4);
200}
201
202void
203sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204{
205	struct sctp_cwnd_log sctp_clog;
206
207	memset(&sctp_clog, 0, sizeof(sctp_clog));
208	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210	sctp_clog.x.fr.tsn = tsn;
211	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212	    SCTP_LOG_EVENT_FR,
213	    from,
214	    sctp_clog.x.misc.log1,
215	    sctp_clog.x.misc.log2,
216	    sctp_clog.x.misc.log3,
217	    sctp_clog.x.misc.log4);
218}
219
220void
221sctp_log_mb(struct mbuf *m, int from)
222{
223	struct sctp_cwnd_log sctp_clog;
224
225	sctp_clog.x.mb.mp = m;
226	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
227	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
228	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
229	if (SCTP_BUF_IS_EXTENDED(m)) {
230		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
231		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
232	} else {
233		sctp_clog.x.mb.ext = 0;
234		sctp_clog.x.mb.refcnt = 0;
235	}
236	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
237	    SCTP_LOG_EVENT_MBUF,
238	    from,
239	    sctp_clog.x.misc.log1,
240	    sctp_clog.x.misc.log2,
241	    sctp_clog.x.misc.log3,
242	    sctp_clog.x.misc.log4);
243}
244
245void
246sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
247{
248	struct sctp_cwnd_log sctp_clog;
249
250	if (control == NULL) {
251		SCTP_PRINTF("Gak log of NULL?\n");
252		return;
253	}
254	sctp_clog.x.strlog.stcb = control->stcb;
255	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
256	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
257	sctp_clog.x.strlog.strm = control->sinfo_stream;
258	if (poschk != NULL) {
259		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
260		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
261	} else {
262		sctp_clog.x.strlog.e_tsn = 0;
263		sctp_clog.x.strlog.e_sseq = 0;
264	}
265	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
266	    SCTP_LOG_EVENT_STRM,
267	    from,
268	    sctp_clog.x.misc.log1,
269	    sctp_clog.x.misc.log2,
270	    sctp_clog.x.misc.log3,
271	    sctp_clog.x.misc.log4);
272}
273
274void
275sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
276{
277	struct sctp_cwnd_log sctp_clog;
278
279	sctp_clog.x.cwnd.net = net;
280	if (stcb->asoc.send_queue_cnt > 255)
281		sctp_clog.x.cwnd.cnt_in_send = 255;
282	else
283		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284	if (stcb->asoc.stream_queue_cnt > 255)
285		sctp_clog.x.cwnd.cnt_in_str = 255;
286	else
287		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288
289	if (net) {
290		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
291		sctp_clog.x.cwnd.inflight = net->flight_size;
292		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
293		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295	}
296	if (SCTP_CWNDLOG_PRESEND == from) {
297		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298	}
299	sctp_clog.x.cwnd.cwnd_augment = augment;
300	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
301	    SCTP_LOG_EVENT_CWND,
302	    from,
303	    sctp_clog.x.misc.log1,
304	    sctp_clog.x.misc.log2,
305	    sctp_clog.x.misc.log3,
306	    sctp_clog.x.misc.log4);
307}
308
309void
310sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
311{
312	struct sctp_cwnd_log sctp_clog;
313
314	memset(&sctp_clog, 0, sizeof(sctp_clog));
315	if (inp) {
316		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
317
318	} else {
319		sctp_clog.x.lock.sock = (void *)NULL;
320	}
321	sctp_clog.x.lock.inp = (void *)inp;
322	if (stcb) {
323		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
324	} else {
325		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
326	}
327	if (inp) {
328		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
329		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
330	} else {
331		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
332		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
333	}
334	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
335	if (inp && (inp->sctp_socket)) {
336		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
337		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
339	} else {
340		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
341		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
342		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
343	}
344	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345	    SCTP_LOG_LOCK_EVENT,
346	    from,
347	    sctp_clog.x.misc.log1,
348	    sctp_clog.x.misc.log2,
349	    sctp_clog.x.misc.log3,
350	    sctp_clog.x.misc.log4);
351}
352
353void
354sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
355{
356	struct sctp_cwnd_log sctp_clog;
357
358	memset(&sctp_clog, 0, sizeof(sctp_clog));
359	sctp_clog.x.cwnd.net = net;
360	sctp_clog.x.cwnd.cwnd_new_value = error;
361	sctp_clog.x.cwnd.inflight = net->flight_size;
362	sctp_clog.x.cwnd.cwnd_augment = burst;
363	if (stcb->asoc.send_queue_cnt > 255)
364		sctp_clog.x.cwnd.cnt_in_send = 255;
365	else
366		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
367	if (stcb->asoc.stream_queue_cnt > 255)
368		sctp_clog.x.cwnd.cnt_in_str = 255;
369	else
370		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
371	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
372	    SCTP_LOG_EVENT_MAXBURST,
373	    from,
374	    sctp_clog.x.misc.log1,
375	    sctp_clog.x.misc.log2,
376	    sctp_clog.x.misc.log3,
377	    sctp_clog.x.misc.log4);
378}
379
380void
381sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
382{
383	struct sctp_cwnd_log sctp_clog;
384
385	sctp_clog.x.rwnd.rwnd = peers_rwnd;
386	sctp_clog.x.rwnd.send_size = snd_size;
387	sctp_clog.x.rwnd.overhead = overhead;
388	sctp_clog.x.rwnd.new_rwnd = 0;
389	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390	    SCTP_LOG_EVENT_RWND,
391	    from,
392	    sctp_clog.x.misc.log1,
393	    sctp_clog.x.misc.log2,
394	    sctp_clog.x.misc.log3,
395	    sctp_clog.x.misc.log4);
396}
397
398void
399sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
400{
401	struct sctp_cwnd_log sctp_clog;
402
403	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404	sctp_clog.x.rwnd.send_size = flight_size;
405	sctp_clog.x.rwnd.overhead = overhead;
406	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
407	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408	    SCTP_LOG_EVENT_RWND,
409	    from,
410	    sctp_clog.x.misc.log1,
411	    sctp_clog.x.misc.log2,
412	    sctp_clog.x.misc.log3,
413	    sctp_clog.x.misc.log4);
414}
415
416void
417sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
418{
419	struct sctp_cwnd_log sctp_clog;
420
421	sctp_clog.x.mbcnt.total_queue_size = total_oq;
422	sctp_clog.x.mbcnt.size_change = book;
423	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
424	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
425	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426	    SCTP_LOG_EVENT_MBCNT,
427	    from,
428	    sctp_clog.x.misc.log1,
429	    sctp_clog.x.misc.log2,
430	    sctp_clog.x.misc.log3,
431	    sctp_clog.x.misc.log4);
432}
433
434void
435sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
436{
437	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438	    SCTP_LOG_MISC_EVENT,
439	    from,
440	    a, b, c, d);
441}
442
443void
444sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
445{
446	struct sctp_cwnd_log sctp_clog;
447
448	sctp_clog.x.wake.stcb = (void *)stcb;
449	sctp_clog.x.wake.wake_cnt = wake_cnt;
450	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
451	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
452	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
453
454	if (stcb->asoc.stream_queue_cnt < 0xff)
455		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
456	else
457		sctp_clog.x.wake.stream_qcnt = 0xff;
458
459	if (stcb->asoc.chunks_on_out_queue < 0xff)
460		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
461	else
462		sctp_clog.x.wake.chunks_on_oque = 0xff;
463
464	sctp_clog.x.wake.sctpflags = 0;
465	/* set in the defered mode stuff */
466	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
467		sctp_clog.x.wake.sctpflags |= 1;
468	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
469		sctp_clog.x.wake.sctpflags |= 2;
470	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
471		sctp_clog.x.wake.sctpflags |= 4;
472	/* what about the sb */
473	if (stcb->sctp_socket) {
474		struct socket *so = stcb->sctp_socket;
475
476		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
477	} else {
478		sctp_clog.x.wake.sbflags = 0xff;
479	}
480	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481	    SCTP_LOG_EVENT_WAKE,
482	    from,
483	    sctp_clog.x.misc.log1,
484	    sctp_clog.x.misc.log2,
485	    sctp_clog.x.misc.log3,
486	    sctp_clog.x.misc.log4);
487}
488
489void
490sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
491{
492	struct sctp_cwnd_log sctp_clog;
493
494	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
495	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
496	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
497	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
498	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
499	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
500	sctp_clog.x.blk.sndlen = sendlen;
501	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502	    SCTP_LOG_EVENT_BLOCK,
503	    from,
504	    sctp_clog.x.misc.log1,
505	    sctp_clog.x.misc.log2,
506	    sctp_clog.x.misc.log3,
507	    sctp_clog.x.misc.log4);
508}
509
510int
511sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
512{
513	/* May need to fix this if ktrdump does not work */
514	return (0);
515}
516
517#ifdef SCTP_AUDITING_ENABLED
518uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
519static int sctp_audit_indx = 0;
520
521static
522void
523sctp_print_audit_report(void)
524{
525	int i;
526	int cnt;
527
528	cnt = 0;
529	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
530		if ((sctp_audit_data[i][0] == 0xe0) &&
531		    (sctp_audit_data[i][1] == 0x01)) {
532			cnt = 0;
533			SCTP_PRINTF("\n");
534		} else if (sctp_audit_data[i][0] == 0xf0) {
535			cnt = 0;
536			SCTP_PRINTF("\n");
537		} else if ((sctp_audit_data[i][0] == 0xc0) &&
538		    (sctp_audit_data[i][1] == 0x01)) {
539			SCTP_PRINTF("\n");
540			cnt = 0;
541		}
542		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
543		    (uint32_t) sctp_audit_data[i][1]);
544		cnt++;
545		if ((cnt % 14) == 0)
546			SCTP_PRINTF("\n");
547	}
548	for (i = 0; i < sctp_audit_indx; i++) {
549		if ((sctp_audit_data[i][0] == 0xe0) &&
550		    (sctp_audit_data[i][1] == 0x01)) {
551			cnt = 0;
552			SCTP_PRINTF("\n");
553		} else if (sctp_audit_data[i][0] == 0xf0) {
554			cnt = 0;
555			SCTP_PRINTF("\n");
556		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557		    (sctp_audit_data[i][1] == 0x01)) {
558			SCTP_PRINTF("\n");
559			cnt = 0;
560		}
561		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562		    (uint32_t) sctp_audit_data[i][1]);
563		cnt++;
564		if ((cnt % 14) == 0)
565			SCTP_PRINTF("\n");
566	}
567	SCTP_PRINTF("\n");
568}
569
570void
571sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
572    struct sctp_nets *net)
573{
574	int resend_cnt, tot_out, rep, tot_book_cnt;
575	struct sctp_nets *lnet;
576	struct sctp_tmit_chunk *chk;
577
578	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
579	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
580	sctp_audit_indx++;
581	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
582		sctp_audit_indx = 0;
583	}
584	if (inp == NULL) {
585		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
586		sctp_audit_data[sctp_audit_indx][1] = 0x01;
587		sctp_audit_indx++;
588		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
589			sctp_audit_indx = 0;
590		}
591		return;
592	}
593	if (stcb == NULL) {
594		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595		sctp_audit_data[sctp_audit_indx][1] = 0x02;
596		sctp_audit_indx++;
597		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598			sctp_audit_indx = 0;
599		}
600		return;
601	}
602	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
603	sctp_audit_data[sctp_audit_indx][1] =
604	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
605	sctp_audit_indx++;
606	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607		sctp_audit_indx = 0;
608	}
609	rep = 0;
610	tot_book_cnt = 0;
611	resend_cnt = tot_out = 0;
612	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
613		if (chk->sent == SCTP_DATAGRAM_RESEND) {
614			resend_cnt++;
615		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
616			tot_out += chk->book_size;
617			tot_book_cnt++;
618		}
619	}
620	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
621		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
622		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
623		sctp_audit_indx++;
624		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625			sctp_audit_indx = 0;
626		}
627		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
628		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
629		rep = 1;
630		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
631		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
632		sctp_audit_data[sctp_audit_indx][1] =
633		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
634		sctp_audit_indx++;
635		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636			sctp_audit_indx = 0;
637		}
638	}
639	if (tot_out != stcb->asoc.total_flight) {
640		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
642		sctp_audit_indx++;
643		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644			sctp_audit_indx = 0;
645		}
646		rep = 1;
647		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
648		    (int)stcb->asoc.total_flight);
649		stcb->asoc.total_flight = tot_out;
650	}
651	if (tot_book_cnt != stcb->asoc.total_flight_count) {
652		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
654		sctp_audit_indx++;
655		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656			sctp_audit_indx = 0;
657		}
658		rep = 1;
659		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
660
661		stcb->asoc.total_flight_count = tot_book_cnt;
662	}
663	tot_out = 0;
664	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
665		tot_out += lnet->flight_size;
666	}
667	if (tot_out != stcb->asoc.total_flight) {
668		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
670		sctp_audit_indx++;
671		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672			sctp_audit_indx = 0;
673		}
674		rep = 1;
675		SCTP_PRINTF("real flight:%d net total was %d\n",
676		    stcb->asoc.total_flight, tot_out);
677		/* now corrective action */
678		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679
680			tot_out = 0;
681			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682				if ((chk->whoTo == lnet) &&
683				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
684					tot_out += chk->book_size;
685				}
686			}
687			if (lnet->flight_size != tot_out) {
688				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
689				    (void *)lnet, lnet->flight_size,
690				    tot_out);
691				lnet->flight_size = tot_out;
692			}
693		}
694	}
695	if (rep) {
696		sctp_print_audit_report();
697	}
698}
699
700void
701sctp_audit_log(uint8_t ev, uint8_t fd)
702{
703
704	sctp_audit_data[sctp_audit_indx][0] = ev;
705	sctp_audit_data[sctp_audit_indx][1] = fd;
706	sctp_audit_indx++;
707	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708		sctp_audit_indx = 0;
709	}
710}
711
712#endif
713
714/*
715 * sctp_stop_timers_for_shutdown() should be called
716 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
717 * state to make sure that all timers are stopped.
718 */
719void
720sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
721{
722	struct sctp_association *asoc;
723	struct sctp_nets *net;
724
725	asoc = &stcb->asoc;
726
727	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
728	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
729	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
730	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
731	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
732	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
733		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
734		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
735	}
736}
737
738/*
739 * a list of sizes based on typical mtu's, used only if next hop size not
740 * returned.
741 */
742static uint32_t sctp_mtu_sizes[] = {
743	68,
744	296,
745	508,
746	512,
747	544,
748	576,
749	1006,
750	1492,
751	1500,
752	1536,
753	2002,
754	2048,
755	4352,
756	4464,
757	8166,
758	17914,
759	32000,
760	65535
761};
762
763/*
764 * Return the largest MTU smaller than val. If there is no
765 * entry, just return val.
766 */
767uint32_t
768sctp_get_prev_mtu(uint32_t val)
769{
770	uint32_t i;
771
772	if (val <= sctp_mtu_sizes[0]) {
773		return (val);
774	}
775	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
776		if (val <= sctp_mtu_sizes[i]) {
777			break;
778		}
779	}
780	return (sctp_mtu_sizes[i - 1]);
781}
782
783/*
784 * Return the smallest MTU larger than val. If there is no
785 * entry, just return val.
786 */
787uint32_t
788sctp_get_next_mtu(uint32_t val)
789{
790	/* select another MTU that is just bigger than this one */
791	uint32_t i;
792
793	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
794		if (val < sctp_mtu_sizes[i]) {
795			return (sctp_mtu_sizes[i]);
796		}
797	}
798	return (val);
799}
800
801void
802sctp_fill_random_store(struct sctp_pcb *m)
803{
804	/*
805	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
806	 * our counter. The result becomes our good random numbers and we
807	 * then setup to give these out. Note that we do no locking to
808	 * protect this. This is ok, since if competing folks call this we
809	 * will get more gobbled gook in the random store which is what we
810	 * want. There is a danger that two guys will use the same random
811	 * numbers, but thats ok too since that is random as well :->
812	 */
813	m->store_at = 0;
814	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
815	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
816	    sizeof(m->random_counter), (uint8_t *) m->random_store);
817	m->random_counter++;
818}
819
820uint32_t
821sctp_select_initial_TSN(struct sctp_pcb *inp)
822{
823	/*
824	 * A true implementation should use random selection process to get
825	 * the initial stream sequence number, using RFC1750 as a good
826	 * guideline
827	 */
828	uint32_t x, *xp;
829	uint8_t *p;
830	int store_at, new_store;
831
832	if (inp->initial_sequence_debug != 0) {
833		uint32_t ret;
834
835		ret = inp->initial_sequence_debug;
836		inp->initial_sequence_debug++;
837		return (ret);
838	}
839retry:
840	store_at = inp->store_at;
841	new_store = store_at + sizeof(uint32_t);
842	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
843		new_store = 0;
844	}
845	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
846		goto retry;
847	}
848	if (new_store == 0) {
849		/* Refill the random store */
850		sctp_fill_random_store(inp);
851	}
852	p = &inp->random_store[store_at];
853	xp = (uint32_t *) p;
854	x = *xp;
855	return (x);
856}
857
858uint32_t
859sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
860{
861	uint32_t x;
862	struct timeval now;
863
864	if (check) {
865		(void)SCTP_GETTIME_TIMEVAL(&now);
866	}
867	for (;;) {
868		x = sctp_select_initial_TSN(&inp->sctp_ep);
869		if (x == 0) {
870			/* we never use 0 */
871			continue;
872		}
873		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
874			break;
875		}
876	}
877	return (x);
878}
879
880int
881sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
882    uint32_t override_tag, uint32_t vrf_id)
883{
884	struct sctp_association *asoc;
885
886	/*
887	 * Anything set to zero is taken care of by the allocation routine's
888	 * bzero
889	 */
890
891	/*
892	 * Up front select what scoping to apply on addresses I tell my peer
893	 * Not sure what to do with these right now, we will need to come up
894	 * with a way to set them. We may need to pass them through from the
895	 * caller in the sctp_aloc_assoc() function.
896	 */
897	int i;
898
899	asoc = &stcb->asoc;
900	/* init all variables to a known value. */
901	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
902	asoc->max_burst = inp->sctp_ep.max_burst;
903	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
904	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
905	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
906	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
907	asoc->ecn_supported = inp->ecn_supported;
908	asoc->prsctp_supported = inp->prsctp_supported;
909	asoc->reconfig_supported = inp->reconfig_supported;
910	asoc->nrsack_supported = inp->nrsack_supported;
911	asoc->pktdrop_supported = inp->pktdrop_supported;
912	asoc->sctp_cmt_pf = (uint8_t) 0;
913	asoc->sctp_frag_point = inp->sctp_frag_point;
914	asoc->sctp_features = inp->sctp_features;
915	asoc->default_dscp = inp->sctp_ep.default_dscp;
916#ifdef INET6
917	if (inp->sctp_ep.default_flowlabel) {
918		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
919	} else {
920		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
921			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
922			asoc->default_flowlabel &= 0x000fffff;
923			asoc->default_flowlabel |= 0x80000000;
924		} else {
925			asoc->default_flowlabel = 0;
926		}
927	}
928#endif
929	asoc->sb_send_resv = 0;
930	if (override_tag) {
931		asoc->my_vtag = override_tag;
932	} else {
933		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
934	}
935	/* Get the nonce tags */
936	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
937	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
938	asoc->vrf_id = vrf_id;
939
940#ifdef SCTP_ASOCLOG_OF_TSNS
941	asoc->tsn_in_at = 0;
942	asoc->tsn_out_at = 0;
943	asoc->tsn_in_wrapped = 0;
944	asoc->tsn_out_wrapped = 0;
945	asoc->cumack_log_at = 0;
946	asoc->cumack_log_atsnt = 0;
947#endif
948#ifdef SCTP_FS_SPEC_LOG
949	asoc->fs_index = 0;
950#endif
951	asoc->refcnt = 0;
952	asoc->assoc_up_sent = 0;
953	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
954	    sctp_select_initial_TSN(&inp->sctp_ep);
955	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
956	/* we are optimisitic here */
957	asoc->peer_supports_nat = 0;
958	asoc->sent_queue_retran_cnt = 0;
959
960	/* for CMT */
961	asoc->last_net_cmt_send_started = NULL;
962
963	/* This will need to be adjusted */
964	asoc->last_acked_seq = asoc->init_seq_number - 1;
965	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
966	asoc->asconf_seq_in = asoc->last_acked_seq;
967
968	/* here we are different, we hold the next one we expect */
969	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
970
971	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
972	asoc->initial_rto = inp->sctp_ep.initial_rto;
973
974	asoc->max_init_times = inp->sctp_ep.max_init_times;
975	asoc->max_send_times = inp->sctp_ep.max_send_times;
976	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
977	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
978	asoc->free_chunk_cnt = 0;
979
980	asoc->iam_blocking = 0;
981	asoc->context = inp->sctp_context;
982	asoc->local_strreset_support = inp->local_strreset_support;
983	asoc->def_send = inp->def_send;
984	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
985	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
986	asoc->pr_sctp_cnt = 0;
987	asoc->total_output_queue_size = 0;
988
989	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
990		asoc->scope.ipv6_addr_legal = 1;
991		if (SCTP_IPV6_V6ONLY(inp) == 0) {
992			asoc->scope.ipv4_addr_legal = 1;
993		} else {
994			asoc->scope.ipv4_addr_legal = 0;
995		}
996	} else {
997		asoc->scope.ipv6_addr_legal = 0;
998		asoc->scope.ipv4_addr_legal = 1;
999	}
1000
1001	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1002	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1003
1004	asoc->smallest_mtu = inp->sctp_frag_point;
1005	asoc->minrto = inp->sctp_ep.sctp_minrto;
1006	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1007
1008	asoc->locked_on_sending = NULL;
1009	asoc->stream_locked_on = 0;
1010	asoc->ecn_echo_cnt_onq = 0;
1011	asoc->stream_locked = 0;
1012
1013	asoc->send_sack = 1;
1014
1015	LIST_INIT(&asoc->sctp_restricted_addrs);
1016
1017	TAILQ_INIT(&asoc->nets);
1018	TAILQ_INIT(&asoc->pending_reply_queue);
1019	TAILQ_INIT(&asoc->asconf_ack_sent);
1020	/* Setup to fill the hb random cache at first HB */
1021	asoc->hb_random_idx = 4;
1022
1023	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1024
1025	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1026	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1027
1028	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1029	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1030
1031	/*
1032	 * Now the stream parameters, here we allocate space for all streams
1033	 * that we request by default.
1034	 */
1035	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1036	    inp->sctp_ep.pre_open_stream_count;
1037	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1038	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1039	    SCTP_M_STRMO);
1040	if (asoc->strmout == NULL) {
1041		/* big trouble no memory */
1042		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1043		return (ENOMEM);
1044	}
1045	for (i = 0; i < asoc->streamoutcnt; i++) {
1046		/*
1047		 * inbound side must be set to 0xffff, also NOTE when we get
1048		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1049		 * count (streamoutcnt) but first check if we sent to any of
1050		 * the upper streams that were dropped (if some were). Those
1051		 * that were dropped must be notified to the upper layer as
1052		 * failed to send.
1053		 */
1054		asoc->strmout[i].next_sequence_send = 0x0;
1055		TAILQ_INIT(&asoc->strmout[i].outqueue);
1056		asoc->strmout[i].chunks_on_queues = 0;
1057		asoc->strmout[i].stream_no = i;
1058		asoc->strmout[i].last_msg_incomplete = 0;
1059		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1060	}
1061	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1062
1063	/* Now the mapping array */
1064	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1065	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1066	    SCTP_M_MAP);
1067	if (asoc->mapping_array == NULL) {
1068		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1069		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1070		return (ENOMEM);
1071	}
1072	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1073	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1074	    SCTP_M_MAP);
1075	if (asoc->nr_mapping_array == NULL) {
1076		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1077		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1078		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1079		return (ENOMEM);
1080	}
1081	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1082
1083	/* Now the init of the other outqueues */
1084	TAILQ_INIT(&asoc->free_chunks);
1085	TAILQ_INIT(&asoc->control_send_queue);
1086	TAILQ_INIT(&asoc->asconf_send_queue);
1087	TAILQ_INIT(&asoc->send_queue);
1088	TAILQ_INIT(&asoc->sent_queue);
1089	TAILQ_INIT(&asoc->reasmqueue);
1090	TAILQ_INIT(&asoc->resetHead);
1091	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1092	TAILQ_INIT(&asoc->asconf_queue);
1093	/* authentication fields */
1094	asoc->authinfo.random = NULL;
1095	asoc->authinfo.active_keyid = 0;
1096	asoc->authinfo.assoc_key = NULL;
1097	asoc->authinfo.assoc_keyid = 0;
1098	asoc->authinfo.recv_key = NULL;
1099	asoc->authinfo.recv_keyid = 0;
1100	LIST_INIT(&asoc->shared_keys);
1101	asoc->marked_retrans = 0;
1102	asoc->port = inp->sctp_ep.port;
1103	asoc->timoinit = 0;
1104	asoc->timodata = 0;
1105	asoc->timosack = 0;
1106	asoc->timoshutdown = 0;
1107	asoc->timoheartbeat = 0;
1108	asoc->timocookie = 0;
1109	asoc->timoshutdownack = 0;
1110	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1111	asoc->discontinuity_time = asoc->start_time;
1112	/*
1113	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1114	 * freed later when the association is freed.
1115	 */
1116	return (0);
1117}
1118
1119void
1120sctp_print_mapping_array(struct sctp_association *asoc)
1121{
1122	unsigned int i, limit;
1123
1124	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1125	    asoc->mapping_array_size,
1126	    asoc->mapping_array_base_tsn,
1127	    asoc->cumulative_tsn,
1128	    asoc->highest_tsn_inside_map,
1129	    asoc->highest_tsn_inside_nr_map);
1130	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1131		if (asoc->mapping_array[limit - 1] != 0) {
1132			break;
1133		}
1134	}
1135	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1136	for (i = 0; i < limit; i++) {
1137		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1138	}
1139	if (limit % 16)
1140		SCTP_PRINTF("\n");
1141	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1142		if (asoc->nr_mapping_array[limit - 1]) {
1143			break;
1144		}
1145	}
1146	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1147	for (i = 0; i < limit; i++) {
1148		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1149	}
1150	if (limit % 16)
1151		SCTP_PRINTF("\n");
1152}
1153
1154int
1155sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1156{
1157	/* mapping array needs to grow */
1158	uint8_t *new_array1, *new_array2;
1159	uint32_t new_size;
1160
1161	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1162	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1163	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1164	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1165		/* can't get more, forget it */
1166		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1167		if (new_array1) {
1168			SCTP_FREE(new_array1, SCTP_M_MAP);
1169		}
1170		if (new_array2) {
1171			SCTP_FREE(new_array2, SCTP_M_MAP);
1172		}
1173		return (-1);
1174	}
1175	memset(new_array1, 0, new_size);
1176	memset(new_array2, 0, new_size);
1177	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1178	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1179	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1180	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1181	asoc->mapping_array = new_array1;
1182	asoc->nr_mapping_array = new_array2;
1183	asoc->mapping_array_size = new_size;
1184	return (0);
1185}
1186
1187
1188static void
1189sctp_iterator_work(struct sctp_iterator *it)
1190{
1191	int iteration_count = 0;
1192	int inp_skip = 0;
1193	int first_in = 1;
1194	struct sctp_inpcb *tinp;
1195
1196	SCTP_INP_INFO_RLOCK();
1197	SCTP_ITERATOR_LOCK();
1198	if (it->inp) {
1199		SCTP_INP_RLOCK(it->inp);
1200		SCTP_INP_DECR_REF(it->inp);
1201	}
1202	if (it->inp == NULL) {
1203		/* iterator is complete */
1204done_with_iterator:
1205		SCTP_ITERATOR_UNLOCK();
1206		SCTP_INP_INFO_RUNLOCK();
1207		if (it->function_atend != NULL) {
1208			(*it->function_atend) (it->pointer, it->val);
1209		}
1210		SCTP_FREE(it, SCTP_M_ITER);
1211		return;
1212	}
1213select_a_new_ep:
1214	if (first_in) {
1215		first_in = 0;
1216	} else {
1217		SCTP_INP_RLOCK(it->inp);
1218	}
1219	while (((it->pcb_flags) &&
1220	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1221	    ((it->pcb_features) &&
1222	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1223		/* endpoint flags or features don't match, so keep looking */
1224		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1225			SCTP_INP_RUNLOCK(it->inp);
1226			goto done_with_iterator;
1227		}
1228		tinp = it->inp;
1229		it->inp = LIST_NEXT(it->inp, sctp_list);
1230		SCTP_INP_RUNLOCK(tinp);
1231		if (it->inp == NULL) {
1232			goto done_with_iterator;
1233		}
1234		SCTP_INP_RLOCK(it->inp);
1235	}
1236	/* now go through each assoc which is in the desired state */
1237	if (it->done_current_ep == 0) {
1238		if (it->function_inp != NULL)
1239			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1240		it->done_current_ep = 1;
1241	}
1242	if (it->stcb == NULL) {
1243		/* run the per instance function */
1244		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1245	}
1246	if ((inp_skip) || it->stcb == NULL) {
1247		if (it->function_inp_end != NULL) {
1248			inp_skip = (*it->function_inp_end) (it->inp,
1249			    it->pointer,
1250			    it->val);
1251		}
1252		SCTP_INP_RUNLOCK(it->inp);
1253		goto no_stcb;
1254	}
1255	while (it->stcb) {
1256		SCTP_TCB_LOCK(it->stcb);
1257		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1258			/* not in the right state... keep looking */
1259			SCTP_TCB_UNLOCK(it->stcb);
1260			goto next_assoc;
1261		}
1262		/* see if we have limited out the iterator loop */
1263		iteration_count++;
1264		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1265			/* Pause to let others grab the lock */
1266			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1267			SCTP_TCB_UNLOCK(it->stcb);
1268			SCTP_INP_INCR_REF(it->inp);
1269			SCTP_INP_RUNLOCK(it->inp);
1270			SCTP_ITERATOR_UNLOCK();
1271			SCTP_INP_INFO_RUNLOCK();
1272			SCTP_INP_INFO_RLOCK();
1273			SCTP_ITERATOR_LOCK();
1274			if (sctp_it_ctl.iterator_flags) {
1275				/* We won't be staying here */
1276				SCTP_INP_DECR_REF(it->inp);
1277				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1278				if (sctp_it_ctl.iterator_flags &
1279				    SCTP_ITERATOR_STOP_CUR_IT) {
1280					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1281					goto done_with_iterator;
1282				}
1283				if (sctp_it_ctl.iterator_flags &
1284				    SCTP_ITERATOR_STOP_CUR_INP) {
1285					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1286					goto no_stcb;
1287				}
1288				/* If we reach here huh? */
1289				SCTP_PRINTF("Unknown it ctl flag %x\n",
1290				    sctp_it_ctl.iterator_flags);
1291				sctp_it_ctl.iterator_flags = 0;
1292			}
1293			SCTP_INP_RLOCK(it->inp);
1294			SCTP_INP_DECR_REF(it->inp);
1295			SCTP_TCB_LOCK(it->stcb);
1296			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1297			iteration_count = 0;
1298		}
1299		/* run function on this one */
1300		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1301
1302		/*
1303		 * we lie here, it really needs to have its own type but
1304		 * first I must verify that this won't effect things :-0
1305		 */
1306		if (it->no_chunk_output == 0)
1307			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1308
1309		SCTP_TCB_UNLOCK(it->stcb);
1310next_assoc:
1311		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1312		if (it->stcb == NULL) {
1313			/* Run last function */
1314			if (it->function_inp_end != NULL) {
1315				inp_skip = (*it->function_inp_end) (it->inp,
1316				    it->pointer,
1317				    it->val);
1318			}
1319		}
1320	}
1321	SCTP_INP_RUNLOCK(it->inp);
1322no_stcb:
1323	/* done with all assocs on this endpoint, move on to next endpoint */
1324	it->done_current_ep = 0;
1325	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1326		it->inp = NULL;
1327	} else {
1328		it->inp = LIST_NEXT(it->inp, sctp_list);
1329	}
1330	if (it->inp == NULL) {
1331		goto done_with_iterator;
1332	}
1333	goto select_a_new_ep;
1334}
1335
1336void
1337sctp_iterator_worker(void)
1338{
1339	struct sctp_iterator *it, *nit;
1340
1341	/* This function is called with the WQ lock in place */
1342
1343	sctp_it_ctl.iterator_running = 1;
1344	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1345		sctp_it_ctl.cur_it = it;
1346		/* now lets work on this one */
1347		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1348		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1349		CURVNET_SET(it->vn);
1350		sctp_iterator_work(it);
1351		sctp_it_ctl.cur_it = NULL;
1352		CURVNET_RESTORE();
1353		SCTP_IPI_ITERATOR_WQ_LOCK();
1354		/* sa_ignore FREED_MEMORY */
1355	}
1356	sctp_it_ctl.iterator_running = 0;
1357	return;
1358}
1359
1360
1361static void
1362sctp_handle_addr_wq(void)
1363{
1364	/* deal with the ADDR wq from the rtsock calls */
1365	struct sctp_laddr *wi, *nwi;
1366	struct sctp_asconf_iterator *asc;
1367
1368	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1369	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1370	if (asc == NULL) {
1371		/* Try later, no memory */
1372		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1373		    (struct sctp_inpcb *)NULL,
1374		    (struct sctp_tcb *)NULL,
1375		    (struct sctp_nets *)NULL);
1376		return;
1377	}
1378	LIST_INIT(&asc->list_of_work);
1379	asc->cnt = 0;
1380
1381	SCTP_WQ_ADDR_LOCK();
1382	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1383		LIST_REMOVE(wi, sctp_nxt_addr);
1384		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1385		asc->cnt++;
1386	}
1387	SCTP_WQ_ADDR_UNLOCK();
1388
1389	if (asc->cnt == 0) {
1390		SCTP_FREE(asc, SCTP_M_ASC_IT);
1391	} else {
1392		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1393		    sctp_asconf_iterator_stcb,
1394		    NULL,	/* No ep end for boundall */
1395		    SCTP_PCB_FLAGS_BOUNDALL,
1396		    SCTP_PCB_ANY_FEATURES,
1397		    SCTP_ASOC_ANY_STATE,
1398		    (void *)asc, 0,
1399		    sctp_asconf_iterator_end, NULL, 0);
1400	}
1401}
1402
1403void
1404sctp_timeout_handler(void *t)
1405{
1406	struct sctp_inpcb *inp;
1407	struct sctp_tcb *stcb;
1408	struct sctp_nets *net;
1409	struct sctp_timer *tmr;
1410
1411#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1412	struct socket *so;
1413
1414#endif
1415	int did_output, type;
1416
1417	tmr = (struct sctp_timer *)t;
1418	inp = (struct sctp_inpcb *)tmr->ep;
1419	stcb = (struct sctp_tcb *)tmr->tcb;
1420	net = (struct sctp_nets *)tmr->net;
1421	CURVNET_SET((struct vnet *)tmr->vnet);
1422	did_output = 1;
1423
1424#ifdef SCTP_AUDITING_ENABLED
1425	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1426	sctp_auditing(3, inp, stcb, net);
1427#endif
1428
1429	/* sanity checks... */
1430	if (tmr->self != (void *)tmr) {
1431		/*
1432		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1433		 * (void *)tmr);
1434		 */
1435		CURVNET_RESTORE();
1436		return;
1437	}
1438	tmr->stopped_from = 0xa001;
1439	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1440		/*
1441		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1442		 * tmr->type);
1443		 */
1444		CURVNET_RESTORE();
1445		return;
1446	}
1447	tmr->stopped_from = 0xa002;
1448	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1449		CURVNET_RESTORE();
1450		return;
1451	}
1452	/* if this is an iterator timeout, get the struct and clear inp */
1453	tmr->stopped_from = 0xa003;
1454	type = tmr->type;
1455	if (inp) {
1456		SCTP_INP_INCR_REF(inp);
1457		if ((inp->sctp_socket == NULL) &&
1458		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1459		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1460		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1461		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1462		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1463		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1464		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1465		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1466		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1467		    ) {
1468			SCTP_INP_DECR_REF(inp);
1469			CURVNET_RESTORE();
1470			return;
1471		}
1472	}
1473	tmr->stopped_from = 0xa004;
1474	if (stcb) {
1475		atomic_add_int(&stcb->asoc.refcnt, 1);
1476		if (stcb->asoc.state == 0) {
1477			atomic_add_int(&stcb->asoc.refcnt, -1);
1478			if (inp) {
1479				SCTP_INP_DECR_REF(inp);
1480			}
1481			CURVNET_RESTORE();
1482			return;
1483		}
1484	}
1485	tmr->stopped_from = 0xa005;
1486	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1487	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1488		if (inp) {
1489			SCTP_INP_DECR_REF(inp);
1490		}
1491		if (stcb) {
1492			atomic_add_int(&stcb->asoc.refcnt, -1);
1493		}
1494		CURVNET_RESTORE();
1495		return;
1496	}
1497	tmr->stopped_from = 0xa006;
1498
1499	if (stcb) {
1500		SCTP_TCB_LOCK(stcb);
1501		atomic_add_int(&stcb->asoc.refcnt, -1);
1502		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1503		    ((stcb->asoc.state == 0) ||
1504		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1505			SCTP_TCB_UNLOCK(stcb);
1506			if (inp) {
1507				SCTP_INP_DECR_REF(inp);
1508			}
1509			CURVNET_RESTORE();
1510			return;
1511		}
1512	}
1513	/* record in stopped what t-o occured */
1514	tmr->stopped_from = tmr->type;
1515
1516	/* mark as being serviced now */
1517	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1518		/*
1519		 * Callout has been rescheduled.
1520		 */
1521		goto get_out;
1522	}
1523	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1524		/*
1525		 * Not active, so no action.
1526		 */
1527		goto get_out;
1528	}
1529	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1530
1531	/* call the handler for the appropriate timer type */
1532	switch (tmr->type) {
1533	case SCTP_TIMER_TYPE_ZERO_COPY:
1534		if (inp == NULL) {
1535			break;
1536		}
1537		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1538			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1539		}
1540		break;
1541	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1542		if (inp == NULL) {
1543			break;
1544		}
1545		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1546			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1547		}
1548		break;
1549	case SCTP_TIMER_TYPE_ADDR_WQ:
1550		sctp_handle_addr_wq();
1551		break;
1552	case SCTP_TIMER_TYPE_SEND:
1553		if ((stcb == NULL) || (inp == NULL)) {
1554			break;
1555		}
1556		SCTP_STAT_INCR(sctps_timodata);
1557		stcb->asoc.timodata++;
1558		stcb->asoc.num_send_timers_up--;
1559		if (stcb->asoc.num_send_timers_up < 0) {
1560			stcb->asoc.num_send_timers_up = 0;
1561		}
1562		SCTP_TCB_LOCK_ASSERT(stcb);
1563		if (sctp_t3rxt_timer(inp, stcb, net)) {
1564			/* no need to unlock on tcb its gone */
1565
1566			goto out_decr;
1567		}
1568		SCTP_TCB_LOCK_ASSERT(stcb);
1569#ifdef SCTP_AUDITING_ENABLED
1570		sctp_auditing(4, inp, stcb, net);
1571#endif
1572		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1573		if ((stcb->asoc.num_send_timers_up == 0) &&
1574		    (stcb->asoc.sent_queue_cnt > 0)) {
1575			struct sctp_tmit_chunk *chk;
1576
1577			/*
1578			 * safeguard. If there on some on the sent queue
1579			 * somewhere but no timers running something is
1580			 * wrong... so we start a timer on the first chunk
1581			 * on the send queue on whatever net it is sent to.
1582			 */
1583			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1584			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1585			    chk->whoTo);
1586		}
1587		break;
1588	case SCTP_TIMER_TYPE_INIT:
1589		if ((stcb == NULL) || (inp == NULL)) {
1590			break;
1591		}
1592		SCTP_STAT_INCR(sctps_timoinit);
1593		stcb->asoc.timoinit++;
1594		if (sctp_t1init_timer(inp, stcb, net)) {
1595			/* no need to unlock on tcb its gone */
1596			goto out_decr;
1597		}
1598		/* We do output but not here */
1599		did_output = 0;
1600		break;
1601	case SCTP_TIMER_TYPE_RECV:
1602		if ((stcb == NULL) || (inp == NULL)) {
1603			break;
1604		}
1605		SCTP_STAT_INCR(sctps_timosack);
1606		stcb->asoc.timosack++;
1607		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1608#ifdef SCTP_AUDITING_ENABLED
1609		sctp_auditing(4, inp, stcb, net);
1610#endif
1611		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1612		break;
1613	case SCTP_TIMER_TYPE_SHUTDOWN:
1614		if ((stcb == NULL) || (inp == NULL)) {
1615			break;
1616		}
1617		if (sctp_shutdown_timer(inp, stcb, net)) {
1618			/* no need to unlock on tcb its gone */
1619			goto out_decr;
1620		}
1621		SCTP_STAT_INCR(sctps_timoshutdown);
1622		stcb->asoc.timoshutdown++;
1623#ifdef SCTP_AUDITING_ENABLED
1624		sctp_auditing(4, inp, stcb, net);
1625#endif
1626		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1627		break;
1628	case SCTP_TIMER_TYPE_HEARTBEAT:
1629		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1630			break;
1631		}
1632		SCTP_STAT_INCR(sctps_timoheartbeat);
1633		stcb->asoc.timoheartbeat++;
1634		if (sctp_heartbeat_timer(inp, stcb, net)) {
1635			/* no need to unlock on tcb its gone */
1636			goto out_decr;
1637		}
1638#ifdef SCTP_AUDITING_ENABLED
1639		sctp_auditing(4, inp, stcb, net);
1640#endif
1641		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1642			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1643			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1644		}
1645		break;
1646	case SCTP_TIMER_TYPE_COOKIE:
1647		if ((stcb == NULL) || (inp == NULL)) {
1648			break;
1649		}
1650		if (sctp_cookie_timer(inp, stcb, net)) {
1651			/* no need to unlock on tcb its gone */
1652			goto out_decr;
1653		}
1654		SCTP_STAT_INCR(sctps_timocookie);
1655		stcb->asoc.timocookie++;
1656#ifdef SCTP_AUDITING_ENABLED
1657		sctp_auditing(4, inp, stcb, net);
1658#endif
1659		/*
1660		 * We consider T3 and Cookie timer pretty much the same with
1661		 * respect to where from in chunk_output.
1662		 */
1663		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1664		break;
1665	case SCTP_TIMER_TYPE_NEWCOOKIE:
1666		{
1667			struct timeval tv;
1668			int i, secret;
1669
1670			if (inp == NULL) {
1671				break;
1672			}
1673			SCTP_STAT_INCR(sctps_timosecret);
1674			(void)SCTP_GETTIME_TIMEVAL(&tv);
1675			SCTP_INP_WLOCK(inp);
1676			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1677			inp->sctp_ep.last_secret_number =
1678			    inp->sctp_ep.current_secret_number;
1679			inp->sctp_ep.current_secret_number++;
1680			if (inp->sctp_ep.current_secret_number >=
1681			    SCTP_HOW_MANY_SECRETS) {
1682				inp->sctp_ep.current_secret_number = 0;
1683			}
1684			secret = (int)inp->sctp_ep.current_secret_number;
1685			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1686				inp->sctp_ep.secret_key[secret][i] =
1687				    sctp_select_initial_TSN(&inp->sctp_ep);
1688			}
1689			SCTP_INP_WUNLOCK(inp);
1690			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1691		}
1692		did_output = 0;
1693		break;
1694	case SCTP_TIMER_TYPE_PATHMTURAISE:
1695		if ((stcb == NULL) || (inp == NULL)) {
1696			break;
1697		}
1698		SCTP_STAT_INCR(sctps_timopathmtu);
1699		sctp_pathmtu_timer(inp, stcb, net);
1700		did_output = 0;
1701		break;
1702	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1703		if ((stcb == NULL) || (inp == NULL)) {
1704			break;
1705		}
1706		if (sctp_shutdownack_timer(inp, stcb, net)) {
1707			/* no need to unlock on tcb its gone */
1708			goto out_decr;
1709		}
1710		SCTP_STAT_INCR(sctps_timoshutdownack);
1711		stcb->asoc.timoshutdownack++;
1712#ifdef SCTP_AUDITING_ENABLED
1713		sctp_auditing(4, inp, stcb, net);
1714#endif
1715		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1716		break;
1717	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1718		if ((stcb == NULL) || (inp == NULL)) {
1719			break;
1720		}
1721		SCTP_STAT_INCR(sctps_timoshutdownguard);
1722		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1723		/* no need to unlock on tcb its gone */
1724		goto out_decr;
1725
1726	case SCTP_TIMER_TYPE_STRRESET:
1727		if ((stcb == NULL) || (inp == NULL)) {
1728			break;
1729		}
1730		if (sctp_strreset_timer(inp, stcb, net)) {
1731			/* no need to unlock on tcb its gone */
1732			goto out_decr;
1733		}
1734		SCTP_STAT_INCR(sctps_timostrmrst);
1735		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1736		break;
1737	case SCTP_TIMER_TYPE_ASCONF:
1738		if ((stcb == NULL) || (inp == NULL)) {
1739			break;
1740		}
1741		if (sctp_asconf_timer(inp, stcb, net)) {
1742			/* no need to unlock on tcb its gone */
1743			goto out_decr;
1744		}
1745		SCTP_STAT_INCR(sctps_timoasconf);
1746#ifdef SCTP_AUDITING_ENABLED
1747		sctp_auditing(4, inp, stcb, net);
1748#endif
1749		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1750		break;
1751	case SCTP_TIMER_TYPE_PRIM_DELETED:
1752		if ((stcb == NULL) || (inp == NULL)) {
1753			break;
1754		}
1755		sctp_delete_prim_timer(inp, stcb, net);
1756		SCTP_STAT_INCR(sctps_timodelprim);
1757		break;
1758
1759	case SCTP_TIMER_TYPE_AUTOCLOSE:
1760		if ((stcb == NULL) || (inp == NULL)) {
1761			break;
1762		}
1763		SCTP_STAT_INCR(sctps_timoautoclose);
1764		sctp_autoclose_timer(inp, stcb, net);
1765		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1766		did_output = 0;
1767		break;
1768	case SCTP_TIMER_TYPE_ASOCKILL:
1769		if ((stcb == NULL) || (inp == NULL)) {
1770			break;
1771		}
1772		SCTP_STAT_INCR(sctps_timoassockill);
1773		/* Can we free it yet? */
1774		SCTP_INP_DECR_REF(inp);
1775		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1776#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1777		so = SCTP_INP_SO(inp);
1778		atomic_add_int(&stcb->asoc.refcnt, 1);
1779		SCTP_TCB_UNLOCK(stcb);
1780		SCTP_SOCKET_LOCK(so, 1);
1781		SCTP_TCB_LOCK(stcb);
1782		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1783#endif
1784		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1785#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1786		SCTP_SOCKET_UNLOCK(so, 1);
1787#endif
1788		/*
1789		 * free asoc, always unlocks (or destroy's) so prevent
1790		 * duplicate unlock or unlock of a free mtx :-0
1791		 */
1792		stcb = NULL;
1793		goto out_no_decr;
1794	case SCTP_TIMER_TYPE_INPKILL:
1795		SCTP_STAT_INCR(sctps_timoinpkill);
1796		if (inp == NULL) {
1797			break;
1798		}
1799		/*
1800		 * special case, take away our increment since WE are the
1801		 * killer
1802		 */
1803		SCTP_INP_DECR_REF(inp);
1804		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1805		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1806		    SCTP_CALLED_FROM_INPKILL_TIMER);
1807		inp = NULL;
1808		goto out_no_decr;
1809	default:
1810		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1811		    tmr->type);
1812		break;
1813	}
1814#ifdef SCTP_AUDITING_ENABLED
1815	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1816	if (inp)
1817		sctp_auditing(5, inp, stcb, net);
1818#endif
1819	if ((did_output) && stcb) {
1820		/*
1821		 * Now we need to clean up the control chunk chain if an
1822		 * ECNE is on it. It must be marked as UNSENT again so next
1823		 * call will continue to send it until such time that we get
1824		 * a CWR, to remove it. It is, however, less likely that we
1825		 * will find a ecn echo on the chain though.
1826		 */
1827		sctp_fix_ecn_echo(&stcb->asoc);
1828	}
1829get_out:
1830	if (stcb) {
1831		SCTP_TCB_UNLOCK(stcb);
1832	}
1833out_decr:
1834	if (inp) {
1835		SCTP_INP_DECR_REF(inp);
1836	}
1837out_no_decr:
1838	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1839	    type);
1840	CURVNET_RESTORE();
1841}
1842
1843void
1844sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1845    struct sctp_nets *net)
1846{
1847	uint32_t to_ticks;
1848	struct sctp_timer *tmr;
1849
1850	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1851		return;
1852
1853	tmr = NULL;
1854	if (stcb) {
1855		SCTP_TCB_LOCK_ASSERT(stcb);
1856	}
1857	switch (t_type) {
1858	case SCTP_TIMER_TYPE_ZERO_COPY:
1859		tmr = &inp->sctp_ep.zero_copy_timer;
1860		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1861		break;
1862	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1863		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1864		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1865		break;
1866	case SCTP_TIMER_TYPE_ADDR_WQ:
1867		/* Only 1 tick away :-) */
1868		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1869		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1870		break;
1871	case SCTP_TIMER_TYPE_SEND:
1872		/* Here we use the RTO timer */
1873		{
1874			int rto_val;
1875
1876			if ((stcb == NULL) || (net == NULL)) {
1877				return;
1878			}
1879			tmr = &net->rxt_timer;
1880			if (net->RTO == 0) {
1881				rto_val = stcb->asoc.initial_rto;
1882			} else {
1883				rto_val = net->RTO;
1884			}
1885			to_ticks = MSEC_TO_TICKS(rto_val);
1886		}
1887		break;
1888	case SCTP_TIMER_TYPE_INIT:
1889		/*
1890		 * Here we use the INIT timer default usually about 1
1891		 * minute.
1892		 */
1893		if ((stcb == NULL) || (net == NULL)) {
1894			return;
1895		}
1896		tmr = &net->rxt_timer;
1897		if (net->RTO == 0) {
1898			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1899		} else {
1900			to_ticks = MSEC_TO_TICKS(net->RTO);
1901		}
1902		break;
1903	case SCTP_TIMER_TYPE_RECV:
1904		/*
1905		 * Here we use the Delayed-Ack timer value from the inp
1906		 * ususually about 200ms.
1907		 */
1908		if (stcb == NULL) {
1909			return;
1910		}
1911		tmr = &stcb->asoc.dack_timer;
1912		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1913		break;
1914	case SCTP_TIMER_TYPE_SHUTDOWN:
1915		/* Here we use the RTO of the destination. */
1916		if ((stcb == NULL) || (net == NULL)) {
1917			return;
1918		}
1919		if (net->RTO == 0) {
1920			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1921		} else {
1922			to_ticks = MSEC_TO_TICKS(net->RTO);
1923		}
1924		tmr = &net->rxt_timer;
1925		break;
1926	case SCTP_TIMER_TYPE_HEARTBEAT:
1927		/*
1928		 * the net is used here so that we can add in the RTO. Even
1929		 * though we use a different timer. We also add the HB timer
1930		 * PLUS a random jitter.
1931		 */
1932		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1933			return;
1934		} else {
1935			uint32_t rndval;
1936			uint32_t jitter;
1937
1938			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1939			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1940				return;
1941			}
1942			if (net->RTO == 0) {
1943				to_ticks = stcb->asoc.initial_rto;
1944			} else {
1945				to_ticks = net->RTO;
1946			}
1947			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1948			jitter = rndval % to_ticks;
1949			if (jitter >= (to_ticks >> 1)) {
1950				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1951			} else {
1952				to_ticks = to_ticks - jitter;
1953			}
1954			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1955			    !(net->dest_state & SCTP_ADDR_PF)) {
1956				to_ticks += net->heart_beat_delay;
1957			}
1958			/*
1959			 * Now we must convert the to_ticks that are now in
1960			 * ms to ticks.
1961			 */
1962			to_ticks = MSEC_TO_TICKS(to_ticks);
1963			tmr = &net->hb_timer;
1964		}
1965		break;
1966	case SCTP_TIMER_TYPE_COOKIE:
1967		/*
1968		 * Here we can use the RTO timer from the network since one
1969		 * RTT was compelete. If a retran happened then we will be
1970		 * using the RTO initial value.
1971		 */
1972		if ((stcb == NULL) || (net == NULL)) {
1973			return;
1974		}
1975		if (net->RTO == 0) {
1976			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1977		} else {
1978			to_ticks = MSEC_TO_TICKS(net->RTO);
1979		}
1980		tmr = &net->rxt_timer;
1981		break;
1982	case SCTP_TIMER_TYPE_NEWCOOKIE:
1983		/*
1984		 * nothing needed but the endpoint here ususually about 60
1985		 * minutes.
1986		 */
1987		if (inp == NULL) {
1988			return;
1989		}
1990		tmr = &inp->sctp_ep.signature_change;
1991		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1992		break;
1993	case SCTP_TIMER_TYPE_ASOCKILL:
1994		if (stcb == NULL) {
1995			return;
1996		}
1997		tmr = &stcb->asoc.strreset_timer;
1998		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1999		break;
2000	case SCTP_TIMER_TYPE_INPKILL:
2001		/*
2002		 * The inp is setup to die. We re-use the signature_chage
2003		 * timer since that has stopped and we are in the GONE
2004		 * state.
2005		 */
2006		if (inp == NULL) {
2007			return;
2008		}
2009		tmr = &inp->sctp_ep.signature_change;
2010		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2011		break;
2012	case SCTP_TIMER_TYPE_PATHMTURAISE:
2013		/*
2014		 * Here we use the value found in the EP for PMTU ususually
2015		 * about 10 minutes.
2016		 */
2017		if ((stcb == NULL) || (inp == NULL)) {
2018			return;
2019		}
2020		if (net == NULL) {
2021			return;
2022		}
2023		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2024			return;
2025		}
2026		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2027		tmr = &net->pmtu_timer;
2028		break;
2029	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2030		/* Here we use the RTO of the destination */
2031		if ((stcb == NULL) || (net == NULL)) {
2032			return;
2033		}
2034		if (net->RTO == 0) {
2035			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2036		} else {
2037			to_ticks = MSEC_TO_TICKS(net->RTO);
2038		}
2039		tmr = &net->rxt_timer;
2040		break;
2041	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2042		/*
2043		 * Here we use the endpoints shutdown guard timer usually
2044		 * about 3 minutes.
2045		 */
2046		if ((inp == NULL) || (stcb == NULL)) {
2047			return;
2048		}
2049		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2050		tmr = &stcb->asoc.shut_guard_timer;
2051		break;
2052	case SCTP_TIMER_TYPE_STRRESET:
2053		/*
2054		 * Here the timer comes from the stcb but its value is from
2055		 * the net's RTO.
2056		 */
2057		if ((stcb == NULL) || (net == NULL)) {
2058			return;
2059		}
2060		if (net->RTO == 0) {
2061			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2062		} else {
2063			to_ticks = MSEC_TO_TICKS(net->RTO);
2064		}
2065		tmr = &stcb->asoc.strreset_timer;
2066		break;
2067	case SCTP_TIMER_TYPE_ASCONF:
2068		/*
2069		 * Here the timer comes from the stcb but its value is from
2070		 * the net's RTO.
2071		 */
2072		if ((stcb == NULL) || (net == NULL)) {
2073			return;
2074		}
2075		if (net->RTO == 0) {
2076			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2077		} else {
2078			to_ticks = MSEC_TO_TICKS(net->RTO);
2079		}
2080		tmr = &stcb->asoc.asconf_timer;
2081		break;
2082	case SCTP_TIMER_TYPE_PRIM_DELETED:
2083		if ((stcb == NULL) || (net != NULL)) {
2084			return;
2085		}
2086		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2087		tmr = &stcb->asoc.delete_prim_timer;
2088		break;
2089	case SCTP_TIMER_TYPE_AUTOCLOSE:
2090		if (stcb == NULL) {
2091			return;
2092		}
2093		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2094			/*
2095			 * Really an error since stcb is NOT set to
2096			 * autoclose
2097			 */
2098			return;
2099		}
2100		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2101		tmr = &stcb->asoc.autoclose_timer;
2102		break;
2103	default:
2104		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2105		    __FUNCTION__, t_type);
2106		return;
2107		break;
2108	}
2109	if ((to_ticks <= 0) || (tmr == NULL)) {
2110		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2111		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2112		return;
2113	}
2114	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2115		/*
2116		 * we do NOT allow you to have it already running. if it is
2117		 * we leave the current one up unchanged
2118		 */
2119		return;
2120	}
2121	/* At this point we can proceed */
2122	if (t_type == SCTP_TIMER_TYPE_SEND) {
2123		stcb->asoc.num_send_timers_up++;
2124	}
2125	tmr->stopped_from = 0;
2126	tmr->type = t_type;
2127	tmr->ep = (void *)inp;
2128	tmr->tcb = (void *)stcb;
2129	tmr->net = (void *)net;
2130	tmr->self = (void *)tmr;
2131	tmr->vnet = (void *)curvnet;
2132	tmr->ticks = sctp_get_tick_count();
2133	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2134	return;
2135}
2136
2137void
2138sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2139    struct sctp_nets *net, uint32_t from)
2140{
2141	struct sctp_timer *tmr;
2142
2143	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2144	    (inp == NULL))
2145		return;
2146
2147	tmr = NULL;
2148	if (stcb) {
2149		SCTP_TCB_LOCK_ASSERT(stcb);
2150	}
2151	switch (t_type) {
2152	case SCTP_TIMER_TYPE_ZERO_COPY:
2153		tmr = &inp->sctp_ep.zero_copy_timer;
2154		break;
2155	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2156		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2157		break;
2158	case SCTP_TIMER_TYPE_ADDR_WQ:
2159		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2160		break;
2161	case SCTP_TIMER_TYPE_SEND:
2162		if ((stcb == NULL) || (net == NULL)) {
2163			return;
2164		}
2165		tmr = &net->rxt_timer;
2166		break;
2167	case SCTP_TIMER_TYPE_INIT:
2168		if ((stcb == NULL) || (net == NULL)) {
2169			return;
2170		}
2171		tmr = &net->rxt_timer;
2172		break;
2173	case SCTP_TIMER_TYPE_RECV:
2174		if (stcb == NULL) {
2175			return;
2176		}
2177		tmr = &stcb->asoc.dack_timer;
2178		break;
2179	case SCTP_TIMER_TYPE_SHUTDOWN:
2180		if ((stcb == NULL) || (net == NULL)) {
2181			return;
2182		}
2183		tmr = &net->rxt_timer;
2184		break;
2185	case SCTP_TIMER_TYPE_HEARTBEAT:
2186		if ((stcb == NULL) || (net == NULL)) {
2187			return;
2188		}
2189		tmr = &net->hb_timer;
2190		break;
2191	case SCTP_TIMER_TYPE_COOKIE:
2192		if ((stcb == NULL) || (net == NULL)) {
2193			return;
2194		}
2195		tmr = &net->rxt_timer;
2196		break;
2197	case SCTP_TIMER_TYPE_NEWCOOKIE:
2198		/* nothing needed but the endpoint here */
2199		tmr = &inp->sctp_ep.signature_change;
2200		/*
2201		 * We re-use the newcookie timer for the INP kill timer. We
2202		 * must assure that we do not kill it by accident.
2203		 */
2204		break;
2205	case SCTP_TIMER_TYPE_ASOCKILL:
2206		/*
2207		 * Stop the asoc kill timer.
2208		 */
2209		if (stcb == NULL) {
2210			return;
2211		}
2212		tmr = &stcb->asoc.strreset_timer;
2213		break;
2214
2215	case SCTP_TIMER_TYPE_INPKILL:
2216		/*
2217		 * The inp is setup to die. We re-use the signature_chage
2218		 * timer since that has stopped and we are in the GONE
2219		 * state.
2220		 */
2221		tmr = &inp->sctp_ep.signature_change;
2222		break;
2223	case SCTP_TIMER_TYPE_PATHMTURAISE:
2224		if ((stcb == NULL) || (net == NULL)) {
2225			return;
2226		}
2227		tmr = &net->pmtu_timer;
2228		break;
2229	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2230		if ((stcb == NULL) || (net == NULL)) {
2231			return;
2232		}
2233		tmr = &net->rxt_timer;
2234		break;
2235	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2236		if (stcb == NULL) {
2237			return;
2238		}
2239		tmr = &stcb->asoc.shut_guard_timer;
2240		break;
2241	case SCTP_TIMER_TYPE_STRRESET:
2242		if (stcb == NULL) {
2243			return;
2244		}
2245		tmr = &stcb->asoc.strreset_timer;
2246		break;
2247	case SCTP_TIMER_TYPE_ASCONF:
2248		if (stcb == NULL) {
2249			return;
2250		}
2251		tmr = &stcb->asoc.asconf_timer;
2252		break;
2253	case SCTP_TIMER_TYPE_PRIM_DELETED:
2254		if (stcb == NULL) {
2255			return;
2256		}
2257		tmr = &stcb->asoc.delete_prim_timer;
2258		break;
2259	case SCTP_TIMER_TYPE_AUTOCLOSE:
2260		if (stcb == NULL) {
2261			return;
2262		}
2263		tmr = &stcb->asoc.autoclose_timer;
2264		break;
2265	default:
2266		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2267		    __FUNCTION__, t_type);
2268		break;
2269	}
2270	if (tmr == NULL) {
2271		return;
2272	}
2273	if ((tmr->type != t_type) && tmr->type) {
2274		/*
2275		 * Ok we have a timer that is under joint use. Cookie timer
2276		 * per chance with the SEND timer. We therefore are NOT
2277		 * running the timer that the caller wants stopped.  So just
2278		 * return.
2279		 */
2280		return;
2281	}
2282	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2283		stcb->asoc.num_send_timers_up--;
2284		if (stcb->asoc.num_send_timers_up < 0) {
2285			stcb->asoc.num_send_timers_up = 0;
2286		}
2287	}
2288	tmr->self = NULL;
2289	tmr->stopped_from = from;
2290	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2291	return;
2292}
2293
2294uint32_t
2295sctp_calculate_len(struct mbuf *m)
2296{
2297	uint32_t tlen = 0;
2298	struct mbuf *at;
2299
2300	at = m;
2301	while (at) {
2302		tlen += SCTP_BUF_LEN(at);
2303		at = SCTP_BUF_NEXT(at);
2304	}
2305	return (tlen);
2306}
2307
2308void
2309sctp_mtu_size_reset(struct sctp_inpcb *inp,
2310    struct sctp_association *asoc, uint32_t mtu)
2311{
2312	/*
2313	 * Reset the P-MTU size on this association, this involves changing
2314	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2315	 * allow the DF flag to be cleared.
2316	 */
2317	struct sctp_tmit_chunk *chk;
2318	unsigned int eff_mtu, ovh;
2319
2320	asoc->smallest_mtu = mtu;
2321	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2322		ovh = SCTP_MIN_OVERHEAD;
2323	} else {
2324		ovh = SCTP_MIN_V4_OVERHEAD;
2325	}
2326	eff_mtu = mtu - ovh;
2327	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2328		if (chk->send_size > eff_mtu) {
2329			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2330		}
2331	}
2332	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2333		if (chk->send_size > eff_mtu) {
2334			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2335		}
2336	}
2337}
2338
2339
2340/*
2341 * given an association and starting time of the current RTT period return
2342 * RTO in number of msecs net should point to the current network
2343 */
2344
2345uint32_t
2346sctp_calculate_rto(struct sctp_tcb *stcb,
2347    struct sctp_association *asoc,
2348    struct sctp_nets *net,
2349    struct timeval *told,
2350    int safe, int rtt_from_sack)
2351{
2352	/*-
2353	 * given an association and the starting time of the current RTT
2354	 * period (in value1/value2) return RTO in number of msecs.
2355	 */
2356	int32_t rtt;		/* RTT in ms */
2357	uint32_t new_rto;
2358	int first_measure = 0;
2359	struct timeval now, then, *old;
2360
2361	/* Copy it out for sparc64 */
2362	if (safe == sctp_align_unsafe_makecopy) {
2363		old = &then;
2364		memcpy(&then, told, sizeof(struct timeval));
2365	} else if (safe == sctp_align_safe_nocopy) {
2366		old = told;
2367	} else {
2368		/* error */
2369		SCTP_PRINTF("Huh, bad rto calc call\n");
2370		return (0);
2371	}
2372	/************************/
2373	/* 1. calculate new RTT */
2374	/************************/
2375	/* get the current time */
2376	if (stcb->asoc.use_precise_time) {
2377		(void)SCTP_GETPTIME_TIMEVAL(&now);
2378	} else {
2379		(void)SCTP_GETTIME_TIMEVAL(&now);
2380	}
2381	timevalsub(&now, old);
2382	/* store the current RTT in us */
2383	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2384	        (uint64_t) now.tv_usec;
2385
2386	/* computer rtt in ms */
2387	rtt = net->rtt / 1000;
2388	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2389		/*
2390		 * Tell the CC module that a new update has just occurred
2391		 * from a sack
2392		 */
2393		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2394	}
2395	/*
2396	 * Do we need to determine the lan? We do this only on sacks i.e.
2397	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2398	 */
2399	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2400	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2401		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2402			net->lan_type = SCTP_LAN_INTERNET;
2403		} else {
2404			net->lan_type = SCTP_LAN_LOCAL;
2405		}
2406	}
2407	/***************************/
2408	/* 2. update RTTVAR & SRTT */
2409	/***************************/
2410	/*-
2411	 * Compute the scaled average lastsa and the
2412	 * scaled variance lastsv as described in van Jacobson
2413	 * Paper "Congestion Avoidance and Control", Annex A.
2414	 *
2415	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2416	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2417	 */
2418	if (net->RTO_measured) {
2419		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2420		net->lastsa += rtt;
2421		if (rtt < 0) {
2422			rtt = -rtt;
2423		}
2424		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2425		net->lastsv += rtt;
2426		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2427			rto_logging(net, SCTP_LOG_RTTVAR);
2428		}
2429	} else {
2430		/* First RTO measurment */
2431		net->RTO_measured = 1;
2432		first_measure = 1;
2433		net->lastsa = rtt << SCTP_RTT_SHIFT;
2434		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2435		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2436			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2437		}
2438	}
2439	if (net->lastsv == 0) {
2440		net->lastsv = SCTP_CLOCK_GRANULARITY;
2441	}
2442	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2443	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2444	    (stcb->asoc.sat_network_lockout == 0)) {
2445		stcb->asoc.sat_network = 1;
2446	} else if ((!first_measure) && stcb->asoc.sat_network) {
2447		stcb->asoc.sat_network = 0;
2448		stcb->asoc.sat_network_lockout = 1;
2449	}
2450	/* bound it, per C6/C7 in Section 5.3.1 */
2451	if (new_rto < stcb->asoc.minrto) {
2452		new_rto = stcb->asoc.minrto;
2453	}
2454	if (new_rto > stcb->asoc.maxrto) {
2455		new_rto = stcb->asoc.maxrto;
2456	}
2457	/* we are now returning the RTO */
2458	return (new_rto);
2459}
2460
2461/*
2462 * return a pointer to a contiguous piece of data from the given mbuf chain
2463 * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2464 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2465 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2466 */
2467caddr_t
2468sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2469{
2470	uint32_t count;
2471	uint8_t *ptr;
2472
2473	ptr = in_ptr;
2474	if ((off < 0) || (len <= 0))
2475		return (NULL);
2476
2477	/* find the desired start location */
2478	while ((m != NULL) && (off > 0)) {
2479		if (off < SCTP_BUF_LEN(m))
2480			break;
2481		off -= SCTP_BUF_LEN(m);
2482		m = SCTP_BUF_NEXT(m);
2483	}
2484	if (m == NULL)
2485		return (NULL);
2486
2487	/* is the current mbuf large enough (eg. contiguous)? */
2488	if ((SCTP_BUF_LEN(m) - off) >= len) {
2489		return (mtod(m, caddr_t)+off);
2490	} else {
2491		/* else, it spans more than one mbuf, so save a temp copy... */
2492		while ((m != NULL) && (len > 0)) {
2493			count = min(SCTP_BUF_LEN(m) - off, len);
2494			bcopy(mtod(m, caddr_t)+off, ptr, count);
2495			len -= count;
2496			ptr += count;
2497			off = 0;
2498			m = SCTP_BUF_NEXT(m);
2499		}
2500		if ((m == NULL) && (len > 0))
2501			return (NULL);
2502		else
2503			return ((caddr_t)in_ptr);
2504	}
2505}
2506
2507
2508
2509struct sctp_paramhdr *
2510sctp_get_next_param(struct mbuf *m,
2511    int offset,
2512    struct sctp_paramhdr *pull,
2513    int pull_limit)
2514{
2515	/* This just provides a typed signature to Peter's Pull routine */
2516	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2517	    (uint8_t *) pull));
2518}
2519
2520
2521struct mbuf *
2522sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2523{
2524	struct mbuf *m_last;
2525	caddr_t dp;
2526
2527	if (padlen > 3) {
2528		return (NULL);
2529	}
2530	if (padlen <= M_TRAILINGSPACE(m)) {
2531		/*
2532		 * The easy way. We hope the majority of the time we hit
2533		 * here :)
2534		 */
2535		m_last = m;
2536	} else {
2537		/* Hard way we must grow the mbuf chain */
2538		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2539		if (m_last == NULL) {
2540			return (NULL);
2541		}
2542		SCTP_BUF_LEN(m_last) = 0;
2543		SCTP_BUF_NEXT(m_last) = NULL;
2544		SCTP_BUF_NEXT(m) = m_last;
2545	}
2546	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2547	SCTP_BUF_LEN(m_last) += padlen;
2548	memset(dp, 0, padlen);
2549	return (m_last);
2550}
2551
2552struct mbuf *
2553sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2554{
2555	/* find the last mbuf in chain and pad it */
2556	struct mbuf *m_at;
2557
2558	if (last_mbuf != NULL) {
2559		return (sctp_add_pad_tombuf(last_mbuf, padval));
2560	} else {
2561		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2562			if (SCTP_BUF_NEXT(m_at) == NULL) {
2563				return (sctp_add_pad_tombuf(m_at, padval));
2564			}
2565		}
2566	}
2567	return (NULL);
2568}
2569
2570static void
2571sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2572    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2573#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2574    SCTP_UNUSED
2575#endif
2576)
2577{
2578	struct mbuf *m_notify;
2579	struct sctp_assoc_change *sac;
2580	struct sctp_queued_to_read *control;
2581	size_t notif_len, abort_len;
2582	unsigned int i;
2583
2584#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2585	struct socket *so;
2586
2587#endif
2588
2589	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2590		notif_len = sizeof(struct sctp_assoc_change);
2591		if (abort != NULL) {
2592			abort_len = ntohs(abort->ch.chunk_length);
2593		} else {
2594			abort_len = 0;
2595		}
2596		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2597			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2598		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2599			notif_len += abort_len;
2600		}
2601		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2602		if (m_notify == NULL) {
2603			/* Retry with smaller value. */
2604			notif_len = sizeof(struct sctp_assoc_change);
2605			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2606			if (m_notify == NULL) {
2607				goto set_error;
2608			}
2609		}
2610		SCTP_BUF_NEXT(m_notify) = NULL;
2611		sac = mtod(m_notify, struct sctp_assoc_change *);
2612		memset(sac, 0, notif_len);
2613		sac->sac_type = SCTP_ASSOC_CHANGE;
2614		sac->sac_flags = 0;
2615		sac->sac_length = sizeof(struct sctp_assoc_change);
2616		sac->sac_state = state;
2617		sac->sac_error = error;
2618		/* XXX verify these stream counts */
2619		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2620		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2621		sac->sac_assoc_id = sctp_get_associd(stcb);
2622		if (notif_len > sizeof(struct sctp_assoc_change)) {
2623			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2624				i = 0;
2625				if (stcb->asoc.prsctp_supported) {
2626					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2627				}
2628				if (stcb->asoc.peer_supports_auth) {
2629					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2630				}
2631				if (stcb->asoc.peer_supports_asconf) {
2632					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2633				}
2634				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2635				if (stcb->asoc.reconfig_supported) {
2636					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2637				}
2638				sac->sac_length += i;
2639			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2640				memcpy(sac->sac_info, abort, abort_len);
2641				sac->sac_length += abort_len;
2642			}
2643		}
2644		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2645		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2646		    0, 0, stcb->asoc.context, 0, 0, 0,
2647		    m_notify);
2648		if (control != NULL) {
2649			control->length = SCTP_BUF_LEN(m_notify);
2650			/* not that we need this */
2651			control->tail_mbuf = m_notify;
2652			control->spec_flags = M_NOTIFICATION;
2653			sctp_add_to_readq(stcb->sctp_ep, stcb,
2654			    control,
2655			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2656			    so_locked);
2657		} else {
2658			sctp_m_freem(m_notify);
2659		}
2660	}
2661	/*
2662	 * For 1-to-1 style sockets, we send up and error when an ABORT
2663	 * comes in.
2664	 */
2665set_error:
2666	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2667	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2668	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2669		SOCK_LOCK(stcb->sctp_socket);
2670		if (from_peer) {
2671			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2672				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2673				stcb->sctp_socket->so_error = ECONNREFUSED;
2674			} else {
2675				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2676				stcb->sctp_socket->so_error = ECONNRESET;
2677			}
2678		} else {
2679			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2680			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2681				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2682				stcb->sctp_socket->so_error = ETIMEDOUT;
2683			} else {
2684				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2685				stcb->sctp_socket->so_error = ECONNABORTED;
2686			}
2687		}
2688	}
2689	/* Wake ANY sleepers */
2690#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2691	so = SCTP_INP_SO(stcb->sctp_ep);
2692	if (!so_locked) {
2693		atomic_add_int(&stcb->asoc.refcnt, 1);
2694		SCTP_TCB_UNLOCK(stcb);
2695		SCTP_SOCKET_LOCK(so, 1);
2696		SCTP_TCB_LOCK(stcb);
2697		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2698		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2699			SCTP_SOCKET_UNLOCK(so, 1);
2700			return;
2701		}
2702	}
2703#endif
2704	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2705	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2706	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2707		socantrcvmore_locked(stcb->sctp_socket);
2708	}
2709	sorwakeup(stcb->sctp_socket);
2710	sowwakeup(stcb->sctp_socket);
2711#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2712	if (!so_locked) {
2713		SCTP_SOCKET_UNLOCK(so, 1);
2714	}
2715#endif
2716}
2717
2718static void
2719sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2720    struct sockaddr *sa, uint32_t error)
2721{
2722	struct mbuf *m_notify;
2723	struct sctp_paddr_change *spc;
2724	struct sctp_queued_to_read *control;
2725
2726	if ((stcb == NULL) ||
2727	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2728		/* event not enabled */
2729		return;
2730	}
2731	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2732	if (m_notify == NULL)
2733		return;
2734	SCTP_BUF_LEN(m_notify) = 0;
2735	spc = mtod(m_notify, struct sctp_paddr_change *);
2736	memset(spc, 0, sizeof(struct sctp_paddr_change));
2737	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2738	spc->spc_flags = 0;
2739	spc->spc_length = sizeof(struct sctp_paddr_change);
2740	switch (sa->sa_family) {
2741#ifdef INET
2742	case AF_INET:
2743		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2744		break;
2745#endif
2746#ifdef INET6
2747	case AF_INET6:
2748		{
2749			struct sockaddr_in6 *sin6;
2750
2751			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2752
2753			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2754			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2755				if (sin6->sin6_scope_id == 0) {
2756					/* recover scope_id for user */
2757					(void)sa6_recoverscope(sin6);
2758				} else {
2759					/* clear embedded scope_id for user */
2760					in6_clearscope(&sin6->sin6_addr);
2761				}
2762			}
2763			break;
2764		}
2765#endif
2766	default:
2767		/* TSNH */
2768		break;
2769	}
2770	spc->spc_state = state;
2771	spc->spc_error = error;
2772	spc->spc_assoc_id = sctp_get_associd(stcb);
2773
2774	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2775	SCTP_BUF_NEXT(m_notify) = NULL;
2776
2777	/* append to socket */
2778	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2779	    0, 0, stcb->asoc.context, 0, 0, 0,
2780	    m_notify);
2781	if (control == NULL) {
2782		/* no memory */
2783		sctp_m_freem(m_notify);
2784		return;
2785	}
2786	control->length = SCTP_BUF_LEN(m_notify);
2787	control->spec_flags = M_NOTIFICATION;
2788	/* not that we need this */
2789	control->tail_mbuf = m_notify;
2790	sctp_add_to_readq(stcb->sctp_ep, stcb,
2791	    control,
2792	    &stcb->sctp_socket->so_rcv, 1,
2793	    SCTP_READ_LOCK_NOT_HELD,
2794	    SCTP_SO_NOT_LOCKED);
2795}
2796
2797
2798static void
2799sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2800    struct sctp_tmit_chunk *chk, int so_locked
2801#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2802    SCTP_UNUSED
2803#endif
2804)
2805{
2806	struct mbuf *m_notify;
2807	struct sctp_send_failed *ssf;
2808	struct sctp_send_failed_event *ssfe;
2809	struct sctp_queued_to_read *control;
2810	int length;
2811
2812	if ((stcb == NULL) ||
2813	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2814	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2815		/* event not enabled */
2816		return;
2817	}
2818	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2819		length = sizeof(struct sctp_send_failed_event);
2820	} else {
2821		length = sizeof(struct sctp_send_failed);
2822	}
2823	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2824	if (m_notify == NULL)
2825		/* no space left */
2826		return;
2827	SCTP_BUF_LEN(m_notify) = 0;
2828	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2829		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2830		memset(ssfe, 0, length);
2831		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2832		if (sent) {
2833			ssfe->ssfe_flags = SCTP_DATA_SENT;
2834		} else {
2835			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2836		}
2837		length += chk->send_size;
2838		length -= sizeof(struct sctp_data_chunk);
2839		ssfe->ssfe_length = length;
2840		ssfe->ssfe_error = error;
2841		/* not exactly what the user sent in, but should be close :) */
2842		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2843		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2844		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2845		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2846		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2847		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2848		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2849	} else {
2850		ssf = mtod(m_notify, struct sctp_send_failed *);
2851		memset(ssf, 0, length);
2852		ssf->ssf_type = SCTP_SEND_FAILED;
2853		if (sent) {
2854			ssf->ssf_flags = SCTP_DATA_SENT;
2855		} else {
2856			ssf->ssf_flags = SCTP_DATA_UNSENT;
2857		}
2858		length += chk->send_size;
2859		length -= sizeof(struct sctp_data_chunk);
2860		ssf->ssf_length = length;
2861		ssf->ssf_error = error;
2862		/* not exactly what the user sent in, but should be close :) */
2863		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2864		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2865		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2866		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2867		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2868		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2869		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2870		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2871		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2872	}
2873	if (chk->data) {
2874		/*
2875		 * trim off the sctp chunk header(it should be there)
2876		 */
2877		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2878			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2879			sctp_mbuf_crush(chk->data);
2880			chk->send_size -= sizeof(struct sctp_data_chunk);
2881		}
2882	}
2883	SCTP_BUF_NEXT(m_notify) = chk->data;
2884	/* Steal off the mbuf */
2885	chk->data = NULL;
2886	/*
2887	 * For this case, we check the actual socket buffer, since the assoc
2888	 * is going away we don't want to overfill the socket buffer for a
2889	 * non-reader
2890	 */
2891	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2892		sctp_m_freem(m_notify);
2893		return;
2894	}
2895	/* append to socket */
2896	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2897	    0, 0, stcb->asoc.context, 0, 0, 0,
2898	    m_notify);
2899	if (control == NULL) {
2900		/* no memory */
2901		sctp_m_freem(m_notify);
2902		return;
2903	}
2904	control->spec_flags = M_NOTIFICATION;
2905	sctp_add_to_readq(stcb->sctp_ep, stcb,
2906	    control,
2907	    &stcb->sctp_socket->so_rcv, 1,
2908	    SCTP_READ_LOCK_NOT_HELD,
2909	    so_locked);
2910}
2911
2912
2913static void
2914sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2915    struct sctp_stream_queue_pending *sp, int so_locked
2916#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2917    SCTP_UNUSED
2918#endif
2919)
2920{
2921	struct mbuf *m_notify;
2922	struct sctp_send_failed *ssf;
2923	struct sctp_send_failed_event *ssfe;
2924	struct sctp_queued_to_read *control;
2925	int length;
2926
2927	if ((stcb == NULL) ||
2928	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2929	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2930		/* event not enabled */
2931		return;
2932	}
2933	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2934		length = sizeof(struct sctp_send_failed_event);
2935	} else {
2936		length = sizeof(struct sctp_send_failed);
2937	}
2938	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2939	if (m_notify == NULL) {
2940		/* no space left */
2941		return;
2942	}
2943	SCTP_BUF_LEN(m_notify) = 0;
2944	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2945		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2946		memset(ssfe, 0, length);
2947		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2948		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2949		length += sp->length;
2950		ssfe->ssfe_length = length;
2951		ssfe->ssfe_error = error;
2952		/* not exactly what the user sent in, but should be close :) */
2953		ssfe->ssfe_info.snd_sid = sp->stream;
2954		if (sp->some_taken) {
2955			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2956		} else {
2957			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2958		}
2959		ssfe->ssfe_info.snd_ppid = sp->ppid;
2960		ssfe->ssfe_info.snd_context = sp->context;
2961		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2962		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2963		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2964	} else {
2965		ssf = mtod(m_notify, struct sctp_send_failed *);
2966		memset(ssf, 0, length);
2967		ssf->ssf_type = SCTP_SEND_FAILED;
2968		ssf->ssf_flags = SCTP_DATA_UNSENT;
2969		length += sp->length;
2970		ssf->ssf_length = length;
2971		ssf->ssf_error = error;
2972		/* not exactly what the user sent in, but should be close :) */
2973		ssf->ssf_info.sinfo_stream = sp->stream;
2974		ssf->ssf_info.sinfo_ssn = 0;
2975		if (sp->some_taken) {
2976			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2977		} else {
2978			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2979		}
2980		ssf->ssf_info.sinfo_ppid = sp->ppid;
2981		ssf->ssf_info.sinfo_context = sp->context;
2982		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2983		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2984		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2985	}
2986	SCTP_BUF_NEXT(m_notify) = sp->data;
2987
2988	/* Steal off the mbuf */
2989	sp->data = NULL;
2990	/*
2991	 * For this case, we check the actual socket buffer, since the assoc
2992	 * is going away we don't want to overfill the socket buffer for a
2993	 * non-reader
2994	 */
2995	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2996		sctp_m_freem(m_notify);
2997		return;
2998	}
2999	/* append to socket */
3000	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3001	    0, 0, stcb->asoc.context, 0, 0, 0,
3002	    m_notify);
3003	if (control == NULL) {
3004		/* no memory */
3005		sctp_m_freem(m_notify);
3006		return;
3007	}
3008	control->spec_flags = M_NOTIFICATION;
3009	sctp_add_to_readq(stcb->sctp_ep, stcb,
3010	    control,
3011	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3012}
3013
3014
3015
3016static void
3017sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3018{
3019	struct mbuf *m_notify;
3020	struct sctp_adaptation_event *sai;
3021	struct sctp_queued_to_read *control;
3022
3023	if ((stcb == NULL) ||
3024	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3025		/* event not enabled */
3026		return;
3027	}
3028	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3029	if (m_notify == NULL)
3030		/* no space left */
3031		return;
3032	SCTP_BUF_LEN(m_notify) = 0;
3033	sai = mtod(m_notify, struct sctp_adaptation_event *);
3034	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3035	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3036	sai->sai_flags = 0;
3037	sai->sai_length = sizeof(struct sctp_adaptation_event);
3038	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3039	sai->sai_assoc_id = sctp_get_associd(stcb);
3040
3041	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3042	SCTP_BUF_NEXT(m_notify) = NULL;
3043
3044	/* append to socket */
3045	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3046	    0, 0, stcb->asoc.context, 0, 0, 0,
3047	    m_notify);
3048	if (control == NULL) {
3049		/* no memory */
3050		sctp_m_freem(m_notify);
3051		return;
3052	}
3053	control->length = SCTP_BUF_LEN(m_notify);
3054	control->spec_flags = M_NOTIFICATION;
3055	/* not that we need this */
3056	control->tail_mbuf = m_notify;
3057	sctp_add_to_readq(stcb->sctp_ep, stcb,
3058	    control,
3059	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3060}
3061
3062/* This always must be called with the read-queue LOCKED in the INP */
3063static void
3064sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3065    uint32_t val, int so_locked
3066#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3067    SCTP_UNUSED
3068#endif
3069)
3070{
3071	struct mbuf *m_notify;
3072	struct sctp_pdapi_event *pdapi;
3073	struct sctp_queued_to_read *control;
3074	struct sockbuf *sb;
3075
3076	if ((stcb == NULL) ||
3077	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3078		/* event not enabled */
3079		return;
3080	}
3081	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3082		return;
3083	}
3084	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3085	if (m_notify == NULL)
3086		/* no space left */
3087		return;
3088	SCTP_BUF_LEN(m_notify) = 0;
3089	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3090	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3091	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3092	pdapi->pdapi_flags = 0;
3093	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3094	pdapi->pdapi_indication = error;
3095	pdapi->pdapi_stream = (val >> 16);
3096	pdapi->pdapi_seq = (val & 0x0000ffff);
3097	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3098
3099	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3100	SCTP_BUF_NEXT(m_notify) = NULL;
3101	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3102	    0, 0, stcb->asoc.context, 0, 0, 0,
3103	    m_notify);
3104	if (control == NULL) {
3105		/* no memory */
3106		sctp_m_freem(m_notify);
3107		return;
3108	}
3109	control->spec_flags = M_NOTIFICATION;
3110	control->length = SCTP_BUF_LEN(m_notify);
3111	/* not that we need this */
3112	control->tail_mbuf = m_notify;
3113	control->held_length = 0;
3114	control->length = 0;
3115	sb = &stcb->sctp_socket->so_rcv;
3116	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3117		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3118	}
3119	sctp_sballoc(stcb, sb, m_notify);
3120	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3121		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3122	}
3123	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3124	control->end_added = 1;
3125	if (stcb->asoc.control_pdapi)
3126		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3127	else {
3128		/* we really should not see this case */
3129		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3130	}
3131	if (stcb->sctp_ep && stcb->sctp_socket) {
3132		/* This should always be the case */
3133#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3134		struct socket *so;
3135
3136		so = SCTP_INP_SO(stcb->sctp_ep);
3137		if (!so_locked) {
3138			atomic_add_int(&stcb->asoc.refcnt, 1);
3139			SCTP_TCB_UNLOCK(stcb);
3140			SCTP_SOCKET_LOCK(so, 1);
3141			SCTP_TCB_LOCK(stcb);
3142			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3143			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3144				SCTP_SOCKET_UNLOCK(so, 1);
3145				return;
3146			}
3147		}
3148#endif
3149		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3150#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3151		if (!so_locked) {
3152			SCTP_SOCKET_UNLOCK(so, 1);
3153		}
3154#endif
3155	}
3156}
3157
3158static void
3159sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3160{
3161	struct mbuf *m_notify;
3162	struct sctp_shutdown_event *sse;
3163	struct sctp_queued_to_read *control;
3164
3165	/*
3166	 * For TCP model AND UDP connected sockets we will send an error up
3167	 * when an SHUTDOWN completes
3168	 */
3169	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3170	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3171		/* mark socket closed for read/write and wakeup! */
3172#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3173		struct socket *so;
3174
3175		so = SCTP_INP_SO(stcb->sctp_ep);
3176		atomic_add_int(&stcb->asoc.refcnt, 1);
3177		SCTP_TCB_UNLOCK(stcb);
3178		SCTP_SOCKET_LOCK(so, 1);
3179		SCTP_TCB_LOCK(stcb);
3180		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3181		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3182			SCTP_SOCKET_UNLOCK(so, 1);
3183			return;
3184		}
3185#endif
3186		socantsendmore(stcb->sctp_socket);
3187#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3188		SCTP_SOCKET_UNLOCK(so, 1);
3189#endif
3190	}
3191	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3192		/* event not enabled */
3193		return;
3194	}
3195	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3196	if (m_notify == NULL)
3197		/* no space left */
3198		return;
3199	sse = mtod(m_notify, struct sctp_shutdown_event *);
3200	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3201	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3202	sse->sse_flags = 0;
3203	sse->sse_length = sizeof(struct sctp_shutdown_event);
3204	sse->sse_assoc_id = sctp_get_associd(stcb);
3205
3206	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3207	SCTP_BUF_NEXT(m_notify) = NULL;
3208
3209	/* append to socket */
3210	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3211	    0, 0, stcb->asoc.context, 0, 0, 0,
3212	    m_notify);
3213	if (control == NULL) {
3214		/* no memory */
3215		sctp_m_freem(m_notify);
3216		return;
3217	}
3218	control->spec_flags = M_NOTIFICATION;
3219	control->length = SCTP_BUF_LEN(m_notify);
3220	/* not that we need this */
3221	control->tail_mbuf = m_notify;
3222	sctp_add_to_readq(stcb->sctp_ep, stcb,
3223	    control,
3224	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3225}
3226
3227static void
3228sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3229    int so_locked
3230#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3231    SCTP_UNUSED
3232#endif
3233)
3234{
3235	struct mbuf *m_notify;
3236	struct sctp_sender_dry_event *event;
3237	struct sctp_queued_to_read *control;
3238
3239	if ((stcb == NULL) ||
3240	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3241		/* event not enabled */
3242		return;
3243	}
3244	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3245	if (m_notify == NULL) {
3246		/* no space left */
3247		return;
3248	}
3249	SCTP_BUF_LEN(m_notify) = 0;
3250	event = mtod(m_notify, struct sctp_sender_dry_event *);
3251	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3252	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3253	event->sender_dry_flags = 0;
3254	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3255	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3256
3257	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3258	SCTP_BUF_NEXT(m_notify) = NULL;
3259
3260	/* append to socket */
3261	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3262	    0, 0, stcb->asoc.context, 0, 0, 0,
3263	    m_notify);
3264	if (control == NULL) {
3265		/* no memory */
3266		sctp_m_freem(m_notify);
3267		return;
3268	}
3269	control->length = SCTP_BUF_LEN(m_notify);
3270	control->spec_flags = M_NOTIFICATION;
3271	/* not that we need this */
3272	control->tail_mbuf = m_notify;
3273	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3274	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3275}
3276
3277
3278void
3279sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3280{
3281	struct mbuf *m_notify;
3282	struct sctp_queued_to_read *control;
3283	struct sctp_stream_change_event *stradd;
3284
3285	if ((stcb == NULL) ||
3286	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3287		/* event not enabled */
3288		return;
3289	}
3290	if ((stcb->asoc.peer_req_out) && flag) {
3291		/* Peer made the request, don't tell the local user */
3292		stcb->asoc.peer_req_out = 0;
3293		return;
3294	}
3295	stcb->asoc.peer_req_out = 0;
3296	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3297	if (m_notify == NULL)
3298		/* no space left */
3299		return;
3300	SCTP_BUF_LEN(m_notify) = 0;
3301	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3302	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3303	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3304	stradd->strchange_flags = flag;
3305	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3306	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3307	stradd->strchange_instrms = numberin;
3308	stradd->strchange_outstrms = numberout;
3309	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3310	SCTP_BUF_NEXT(m_notify) = NULL;
3311	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3312		/* no space */
3313		sctp_m_freem(m_notify);
3314		return;
3315	}
3316	/* append to socket */
3317	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3318	    0, 0, stcb->asoc.context, 0, 0, 0,
3319	    m_notify);
3320	if (control == NULL) {
3321		/* no memory */
3322		sctp_m_freem(m_notify);
3323		return;
3324	}
3325	control->spec_flags = M_NOTIFICATION;
3326	control->length = SCTP_BUF_LEN(m_notify);
3327	/* not that we need this */
3328	control->tail_mbuf = m_notify;
3329	sctp_add_to_readq(stcb->sctp_ep, stcb,
3330	    control,
3331	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3332}
3333
3334void
3335sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3336{
3337	struct mbuf *m_notify;
3338	struct sctp_queued_to_read *control;
3339	struct sctp_assoc_reset_event *strasoc;
3340
3341	if ((stcb == NULL) ||
3342	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3343		/* event not enabled */
3344		return;
3345	}
3346	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3347	if (m_notify == NULL)
3348		/* no space left */
3349		return;
3350	SCTP_BUF_LEN(m_notify) = 0;
3351	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3352	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3353	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3354	strasoc->assocreset_flags = flag;
3355	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3356	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3357	strasoc->assocreset_local_tsn = sending_tsn;
3358	strasoc->assocreset_remote_tsn = recv_tsn;
3359	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3360	SCTP_BUF_NEXT(m_notify) = NULL;
3361	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3362		/* no space */
3363		sctp_m_freem(m_notify);
3364		return;
3365	}
3366	/* append to socket */
3367	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3368	    0, 0, stcb->asoc.context, 0, 0, 0,
3369	    m_notify);
3370	if (control == NULL) {
3371		/* no memory */
3372		sctp_m_freem(m_notify);
3373		return;
3374	}
3375	control->spec_flags = M_NOTIFICATION;
3376	control->length = SCTP_BUF_LEN(m_notify);
3377	/* not that we need this */
3378	control->tail_mbuf = m_notify;
3379	sctp_add_to_readq(stcb->sctp_ep, stcb,
3380	    control,
3381	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3382}
3383
3384
3385
3386static void
3387sctp_notify_stream_reset(struct sctp_tcb *stcb,
3388    int number_entries, uint16_t * list, int flag)
3389{
3390	struct mbuf *m_notify;
3391	struct sctp_queued_to_read *control;
3392	struct sctp_stream_reset_event *strreset;
3393	int len;
3394
3395	if ((stcb == NULL) ||
3396	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3397		/* event not enabled */
3398		return;
3399	}
3400	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3401	if (m_notify == NULL)
3402		/* no space left */
3403		return;
3404	SCTP_BUF_LEN(m_notify) = 0;
3405	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3406	if (len > M_TRAILINGSPACE(m_notify)) {
3407		/* never enough room */
3408		sctp_m_freem(m_notify);
3409		return;
3410	}
3411	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3412	memset(strreset, 0, len);
3413	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3414	strreset->strreset_flags = flag;
3415	strreset->strreset_length = len;
3416	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3417	if (number_entries) {
3418		int i;
3419
3420		for (i = 0; i < number_entries; i++) {
3421			strreset->strreset_stream_list[i] = ntohs(list[i]);
3422		}
3423	}
3424	SCTP_BUF_LEN(m_notify) = len;
3425	SCTP_BUF_NEXT(m_notify) = NULL;
3426	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3427		/* no space */
3428		sctp_m_freem(m_notify);
3429		return;
3430	}
3431	/* append to socket */
3432	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3433	    0, 0, stcb->asoc.context, 0, 0, 0,
3434	    m_notify);
3435	if (control == NULL) {
3436		/* no memory */
3437		sctp_m_freem(m_notify);
3438		return;
3439	}
3440	control->spec_flags = M_NOTIFICATION;
3441	control->length = SCTP_BUF_LEN(m_notify);
3442	/* not that we need this */
3443	control->tail_mbuf = m_notify;
3444	sctp_add_to_readq(stcb->sctp_ep, stcb,
3445	    control,
3446	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3447}
3448
3449
3450static void
3451sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3452{
3453	struct mbuf *m_notify;
3454	struct sctp_remote_error *sre;
3455	struct sctp_queued_to_read *control;
3456	size_t notif_len, chunk_len;
3457
3458	if ((stcb == NULL) ||
3459	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3460		return;
3461	}
3462	if (chunk != NULL) {
3463		chunk_len = ntohs(chunk->ch.chunk_length);
3464	} else {
3465		chunk_len = 0;
3466	}
3467	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3468	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3469	if (m_notify == NULL) {
3470		/* Retry with smaller value. */
3471		notif_len = sizeof(struct sctp_remote_error);
3472		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3473		if (m_notify == NULL) {
3474			return;
3475		}
3476	}
3477	SCTP_BUF_NEXT(m_notify) = NULL;
3478	sre = mtod(m_notify, struct sctp_remote_error *);
3479	memset(sre, 0, notif_len);
3480	sre->sre_type = SCTP_REMOTE_ERROR;
3481	sre->sre_flags = 0;
3482	sre->sre_length = sizeof(struct sctp_remote_error);
3483	sre->sre_error = error;
3484	sre->sre_assoc_id = sctp_get_associd(stcb);
3485	if (notif_len > sizeof(struct sctp_remote_error)) {
3486		memcpy(sre->sre_data, chunk, chunk_len);
3487		sre->sre_length += chunk_len;
3488	}
3489	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3490	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3491	    0, 0, stcb->asoc.context, 0, 0, 0,
3492	    m_notify);
3493	if (control != NULL) {
3494		control->length = SCTP_BUF_LEN(m_notify);
3495		/* not that we need this */
3496		control->tail_mbuf = m_notify;
3497		control->spec_flags = M_NOTIFICATION;
3498		sctp_add_to_readq(stcb->sctp_ep, stcb,
3499		    control,
3500		    &stcb->sctp_socket->so_rcv, 1,
3501		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3502	} else {
3503		sctp_m_freem(m_notify);
3504	}
3505}
3506
3507
3508void
3509sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3510    uint32_t error, void *data, int so_locked
3511#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3512    SCTP_UNUSED
3513#endif
3514)
3515{
3516	if ((stcb == NULL) ||
3517	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3518	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3519	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3520		/* If the socket is gone we are out of here */
3521		return;
3522	}
3523	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3524		return;
3525	}
3526	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3527	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3528		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3529		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3530		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3531			/* Don't report these in front states */
3532			return;
3533		}
3534	}
3535	switch (notification) {
3536	case SCTP_NOTIFY_ASSOC_UP:
3537		if (stcb->asoc.assoc_up_sent == 0) {
3538			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3539			stcb->asoc.assoc_up_sent = 1;
3540		}
3541		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3542			sctp_notify_adaptation_layer(stcb);
3543		}
3544		if (stcb->asoc.peer_supports_auth == 0) {
3545			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3546			    NULL, so_locked);
3547		}
3548		break;
3549	case SCTP_NOTIFY_ASSOC_DOWN:
3550		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3551		break;
3552	case SCTP_NOTIFY_INTERFACE_DOWN:
3553		{
3554			struct sctp_nets *net;
3555
3556			net = (struct sctp_nets *)data;
3557			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3558			    (struct sockaddr *)&net->ro._l_addr, error);
3559			break;
3560		}
3561	case SCTP_NOTIFY_INTERFACE_UP:
3562		{
3563			struct sctp_nets *net;
3564
3565			net = (struct sctp_nets *)data;
3566			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3567			    (struct sockaddr *)&net->ro._l_addr, error);
3568			break;
3569		}
3570	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3571		{
3572			struct sctp_nets *net;
3573
3574			net = (struct sctp_nets *)data;
3575			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3576			    (struct sockaddr *)&net->ro._l_addr, error);
3577			break;
3578		}
3579	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3580		sctp_notify_send_failed2(stcb, error,
3581		    (struct sctp_stream_queue_pending *)data, so_locked);
3582		break;
3583	case SCTP_NOTIFY_SENT_DG_FAIL:
3584		sctp_notify_send_failed(stcb, 1, error,
3585		    (struct sctp_tmit_chunk *)data, so_locked);
3586		break;
3587	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3588		sctp_notify_send_failed(stcb, 0, error,
3589		    (struct sctp_tmit_chunk *)data, so_locked);
3590		break;
3591	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3592		{
3593			uint32_t val;
3594
3595			val = *((uint32_t *) data);
3596
3597			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3598			break;
3599		}
3600	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3601		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3602		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3603			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3604		} else {
3605			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3606		}
3607		break;
3608	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3609		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3610		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3611			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3612		} else {
3613			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3614		}
3615		break;
3616	case SCTP_NOTIFY_ASSOC_RESTART:
3617		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3618		if (stcb->asoc.peer_supports_auth == 0) {
3619			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3620			    NULL, so_locked);
3621		}
3622		break;
3623	case SCTP_NOTIFY_STR_RESET_SEND:
3624		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3625		break;
3626	case SCTP_NOTIFY_STR_RESET_RECV:
3627		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3628		break;
3629	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3630		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3631		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3632		break;
3633	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3634		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3635		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3636		break;
3637	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3638		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3639		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3640		break;
3641	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3642		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3643		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3644		break;
3645	case SCTP_NOTIFY_ASCONF_ADD_IP:
3646		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3647		    error);
3648		break;
3649	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3650		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3651		    error);
3652		break;
3653	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3654		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3655		    error);
3656		break;
3657	case SCTP_NOTIFY_PEER_SHUTDOWN:
3658		sctp_notify_shutdown_event(stcb);
3659		break;
3660	case SCTP_NOTIFY_AUTH_NEW_KEY:
3661		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3662		    (uint16_t) (uintptr_t) data,
3663		    so_locked);
3664		break;
3665	case SCTP_NOTIFY_AUTH_FREE_KEY:
3666		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3667		    (uint16_t) (uintptr_t) data,
3668		    so_locked);
3669		break;
3670	case SCTP_NOTIFY_NO_PEER_AUTH:
3671		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3672		    (uint16_t) (uintptr_t) data,
3673		    so_locked);
3674		break;
3675	case SCTP_NOTIFY_SENDER_DRY:
3676		sctp_notify_sender_dry_event(stcb, so_locked);
3677		break;
3678	case SCTP_NOTIFY_REMOTE_ERROR:
3679		sctp_notify_remote_error(stcb, error, data);
3680		break;
3681	default:
3682		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3683		    __FUNCTION__, notification, notification);
3684		break;
3685	}			/* end switch */
3686}
3687
3688void
3689sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3690#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3691    SCTP_UNUSED
3692#endif
3693)
3694{
3695	struct sctp_association *asoc;
3696	struct sctp_stream_out *outs;
3697	struct sctp_tmit_chunk *chk, *nchk;
3698	struct sctp_stream_queue_pending *sp, *nsp;
3699	int i;
3700
3701	if (stcb == NULL) {
3702		return;
3703	}
3704	asoc = &stcb->asoc;
3705	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3706		/* already being freed */
3707		return;
3708	}
3709	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3710	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3711	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3712		return;
3713	}
3714	/* now through all the gunk freeing chunks */
3715	if (holds_lock == 0) {
3716		SCTP_TCB_SEND_LOCK(stcb);
3717	}
3718	/* sent queue SHOULD be empty */
3719	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3720		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3721		asoc->sent_queue_cnt--;
3722		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3723			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3724				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3725#ifdef INVARIANTS
3726			} else {
3727				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3728#endif
3729			}
3730		}
3731		if (chk->data != NULL) {
3732			sctp_free_bufspace(stcb, asoc, chk, 1);
3733			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3734			    error, chk, so_locked);
3735			if (chk->data) {
3736				sctp_m_freem(chk->data);
3737				chk->data = NULL;
3738			}
3739		}
3740		sctp_free_a_chunk(stcb, chk, so_locked);
3741		/* sa_ignore FREED_MEMORY */
3742	}
3743	/* pending send queue SHOULD be empty */
3744	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3745		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3746		asoc->send_queue_cnt--;
3747		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3748			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3749#ifdef INVARIANTS
3750		} else {
3751			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3752#endif
3753		}
3754		if (chk->data != NULL) {
3755			sctp_free_bufspace(stcb, asoc, chk, 1);
3756			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3757			    error, chk, so_locked);
3758			if (chk->data) {
3759				sctp_m_freem(chk->data);
3760				chk->data = NULL;
3761			}
3762		}
3763		sctp_free_a_chunk(stcb, chk, so_locked);
3764		/* sa_ignore FREED_MEMORY */
3765	}
3766	for (i = 0; i < asoc->streamoutcnt; i++) {
3767		/* For each stream */
3768		outs = &asoc->strmout[i];
3769		/* clean up any sends there */
3770		asoc->locked_on_sending = NULL;
3771		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3772			asoc->stream_queue_cnt--;
3773			TAILQ_REMOVE(&outs->outqueue, sp, next);
3774			sctp_free_spbufspace(stcb, asoc, sp);
3775			if (sp->data) {
3776				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3777				    error, (void *)sp, so_locked);
3778				if (sp->data) {
3779					sctp_m_freem(sp->data);
3780					sp->data = NULL;
3781					sp->tail_mbuf = NULL;
3782					sp->length = 0;
3783				}
3784			}
3785			if (sp->net) {
3786				sctp_free_remote_addr(sp->net);
3787				sp->net = NULL;
3788			}
3789			/* Free the chunk */
3790			sctp_free_a_strmoq(stcb, sp, so_locked);
3791			/* sa_ignore FREED_MEMORY */
3792		}
3793	}
3794
3795	if (holds_lock == 0) {
3796		SCTP_TCB_SEND_UNLOCK(stcb);
3797	}
3798}
3799
3800void
3801sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3802    struct sctp_abort_chunk *abort, int so_locked
3803#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3804    SCTP_UNUSED
3805#endif
3806)
3807{
3808	if (stcb == NULL) {
3809		return;
3810	}
3811	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3812	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3813	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3814		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3815	}
3816	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3817	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3818	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3819		return;
3820	}
3821	/* Tell them we lost the asoc */
3822	sctp_report_all_outbound(stcb, error, 1, so_locked);
3823	if (from_peer) {
3824		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3825	} else {
3826		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3827	}
3828}
3829
3830void
3831sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3832    struct mbuf *m, int iphlen,
3833    struct sockaddr *src, struct sockaddr *dst,
3834    struct sctphdr *sh, struct mbuf *op_err,
3835    uint8_t use_mflowid, uint32_t mflowid,
3836    uint32_t vrf_id, uint16_t port)
3837{
3838	uint32_t vtag;
3839
3840#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3841	struct socket *so;
3842
3843#endif
3844
3845	vtag = 0;
3846	if (stcb != NULL) {
3847		/* We have a TCB to abort, send notification too */
3848		vtag = stcb->asoc.peer_vtag;
3849		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3850		/* get the assoc vrf id and table id */
3851		vrf_id = stcb->asoc.vrf_id;
3852		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3853	}
3854	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3855	    use_mflowid, mflowid,
3856	    vrf_id, port);
3857	if (stcb != NULL) {
3858		/* Ok, now lets free it */
3859#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3860		so = SCTP_INP_SO(inp);
3861		atomic_add_int(&stcb->asoc.refcnt, 1);
3862		SCTP_TCB_UNLOCK(stcb);
3863		SCTP_SOCKET_LOCK(so, 1);
3864		SCTP_TCB_LOCK(stcb);
3865		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3866#endif
3867		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3868		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3869		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3870			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3871		}
3872		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3873#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3874		SCTP_SOCKET_UNLOCK(so, 1);
3875#endif
3876	}
3877}
3878
3879#ifdef SCTP_ASOCLOG_OF_TSNS
3880void
3881sctp_print_out_track_log(struct sctp_tcb *stcb)
3882{
3883#ifdef NOSIY_PRINTS
3884	int i;
3885
3886	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3887	SCTP_PRINTF("IN bound TSN log-aaa\n");
3888	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3889		SCTP_PRINTF("None rcvd\n");
3890		goto none_in;
3891	}
3892	if (stcb->asoc.tsn_in_wrapped) {
3893		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3894			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3895			    stcb->asoc.in_tsnlog[i].tsn,
3896			    stcb->asoc.in_tsnlog[i].strm,
3897			    stcb->asoc.in_tsnlog[i].seq,
3898			    stcb->asoc.in_tsnlog[i].flgs,
3899			    stcb->asoc.in_tsnlog[i].sz);
3900		}
3901	}
3902	if (stcb->asoc.tsn_in_at) {
3903		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3904			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3905			    stcb->asoc.in_tsnlog[i].tsn,
3906			    stcb->asoc.in_tsnlog[i].strm,
3907			    stcb->asoc.in_tsnlog[i].seq,
3908			    stcb->asoc.in_tsnlog[i].flgs,
3909			    stcb->asoc.in_tsnlog[i].sz);
3910		}
3911	}
3912none_in:
3913	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3914	if ((stcb->asoc.tsn_out_at == 0) &&
3915	    (stcb->asoc.tsn_out_wrapped == 0)) {
3916		SCTP_PRINTF("None sent\n");
3917	}
3918	if (stcb->asoc.tsn_out_wrapped) {
3919		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3920			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3921			    stcb->asoc.out_tsnlog[i].tsn,
3922			    stcb->asoc.out_tsnlog[i].strm,
3923			    stcb->asoc.out_tsnlog[i].seq,
3924			    stcb->asoc.out_tsnlog[i].flgs,
3925			    stcb->asoc.out_tsnlog[i].sz);
3926		}
3927	}
3928	if (stcb->asoc.tsn_out_at) {
3929		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3930			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3931			    stcb->asoc.out_tsnlog[i].tsn,
3932			    stcb->asoc.out_tsnlog[i].strm,
3933			    stcb->asoc.out_tsnlog[i].seq,
3934			    stcb->asoc.out_tsnlog[i].flgs,
3935			    stcb->asoc.out_tsnlog[i].sz);
3936		}
3937	}
3938#endif
3939}
3940
3941#endif
3942
3943void
3944sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3945    struct mbuf *op_err,
3946    int so_locked
3947#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3948    SCTP_UNUSED
3949#endif
3950)
3951{
3952#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3953	struct socket *so;
3954
3955#endif
3956
3957#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3958	so = SCTP_INP_SO(inp);
3959#endif
3960	if (stcb == NULL) {
3961		/* Got to have a TCB */
3962		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3963			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
3964				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3965				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3966			}
3967		}
3968		return;
3969	} else {
3970		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3971	}
3972	/* notify the ulp */
3973	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3974		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3975	}
3976	/* notify the peer */
3977	sctp_send_abort_tcb(stcb, op_err, so_locked);
3978	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3979	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3980	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3981		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3982	}
3983	/* now free the asoc */
3984#ifdef SCTP_ASOCLOG_OF_TSNS
3985	sctp_print_out_track_log(stcb);
3986#endif
3987#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3988	if (!so_locked) {
3989		atomic_add_int(&stcb->asoc.refcnt, 1);
3990		SCTP_TCB_UNLOCK(stcb);
3991		SCTP_SOCKET_LOCK(so, 1);
3992		SCTP_TCB_LOCK(stcb);
3993		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3994	}
3995#endif
3996	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3997#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3998	if (!so_locked) {
3999		SCTP_SOCKET_UNLOCK(so, 1);
4000	}
4001#endif
4002}
4003
4004void
4005sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4006    struct sockaddr *src, struct sockaddr *dst,
4007    struct sctphdr *sh, struct sctp_inpcb *inp,
4008    struct mbuf *cause,
4009    uint8_t use_mflowid, uint32_t mflowid,
4010    uint32_t vrf_id, uint16_t port)
4011{
4012	struct sctp_chunkhdr *ch, chunk_buf;
4013	unsigned int chk_length;
4014	int contains_init_chunk;
4015
4016	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4017	/* Generate a TO address for future reference */
4018	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4019		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4020			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4021			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4022		}
4023	}
4024	contains_init_chunk = 0;
4025	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4026	    sizeof(*ch), (uint8_t *) & chunk_buf);
4027	while (ch != NULL) {
4028		chk_length = ntohs(ch->chunk_length);
4029		if (chk_length < sizeof(*ch)) {
4030			/* break to abort land */
4031			break;
4032		}
4033		switch (ch->chunk_type) {
4034		case SCTP_INIT:
4035			contains_init_chunk = 1;
4036			break;
4037		case SCTP_PACKET_DROPPED:
4038			/* we don't respond to pkt-dropped */
4039			return;
4040		case SCTP_ABORT_ASSOCIATION:
4041			/* we don't respond with an ABORT to an ABORT */
4042			return;
4043		case SCTP_SHUTDOWN_COMPLETE:
4044			/*
4045			 * we ignore it since we are not waiting for it and
4046			 * peer is gone
4047			 */
4048			return;
4049		case SCTP_SHUTDOWN_ACK:
4050			sctp_send_shutdown_complete2(src, dst, sh,
4051			    use_mflowid, mflowid,
4052			    vrf_id, port);
4053			return;
4054		default:
4055			break;
4056		}
4057		offset += SCTP_SIZE32(chk_length);
4058		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4059		    sizeof(*ch), (uint8_t *) & chunk_buf);
4060	}
4061	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4062	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4063	    (contains_init_chunk == 0))) {
4064		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4065		    use_mflowid, mflowid,
4066		    vrf_id, port);
4067	}
4068}
4069
4070/*
4071 * check the inbound datagram to make sure there is not an abort inside it,
4072 * if there is return 1, else return 0.
4073 */
4074int
4075sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4076{
4077	struct sctp_chunkhdr *ch;
4078	struct sctp_init_chunk *init_chk, chunk_buf;
4079	int offset;
4080	unsigned int chk_length;
4081
4082	offset = iphlen + sizeof(struct sctphdr);
4083	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4084	    (uint8_t *) & chunk_buf);
4085	while (ch != NULL) {
4086		chk_length = ntohs(ch->chunk_length);
4087		if (chk_length < sizeof(*ch)) {
4088			/* packet is probably corrupt */
4089			break;
4090		}
4091		/* we seem to be ok, is it an abort? */
4092		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4093			/* yep, tell them */
4094			return (1);
4095		}
4096		if (ch->chunk_type == SCTP_INITIATION) {
4097			/* need to update the Vtag */
4098			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4099			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4100			if (init_chk != NULL) {
4101				*vtagfill = ntohl(init_chk->init.initiate_tag);
4102			}
4103		}
4104		/* Nope, move to the next chunk */
4105		offset += SCTP_SIZE32(chk_length);
4106		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4107		    sizeof(*ch), (uint8_t *) & chunk_buf);
4108	}
4109	return (0);
4110}
4111
4112/*
4113 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4114 * set (i.e. it's 0) so, create this function to compare link local scopes
4115 */
4116#ifdef INET6
4117uint32_t
4118sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4119{
4120	struct sockaddr_in6 a, b;
4121
4122	/* save copies */
4123	a = *addr1;
4124	b = *addr2;
4125
4126	if (a.sin6_scope_id == 0)
4127		if (sa6_recoverscope(&a)) {
4128			/* can't get scope, so can't match */
4129			return (0);
4130		}
4131	if (b.sin6_scope_id == 0)
4132		if (sa6_recoverscope(&b)) {
4133			/* can't get scope, so can't match */
4134			return (0);
4135		}
4136	if (a.sin6_scope_id != b.sin6_scope_id)
4137		return (0);
4138
4139	return (1);
4140}
4141
4142/*
4143 * returns a sockaddr_in6 with embedded scope recovered and removed
4144 */
4145struct sockaddr_in6 *
4146sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4147{
4148	/* check and strip embedded scope junk */
4149	if (addr->sin6_family == AF_INET6) {
4150		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4151			if (addr->sin6_scope_id == 0) {
4152				*store = *addr;
4153				if (!sa6_recoverscope(store)) {
4154					/* use the recovered scope */
4155					addr = store;
4156				}
4157			} else {
4158				/* else, return the original "to" addr */
4159				in6_clearscope(&addr->sin6_addr);
4160			}
4161		}
4162	}
4163	return (addr);
4164}
4165
4166#endif
4167
4168/*
4169 * are the two addresses the same?  currently a "scopeless" check returns: 1
4170 * if same, 0 if not
4171 */
4172int
4173sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4174{
4175
4176	/* must be valid */
4177	if (sa1 == NULL || sa2 == NULL)
4178		return (0);
4179
4180	/* must be the same family */
4181	if (sa1->sa_family != sa2->sa_family)
4182		return (0);
4183
4184	switch (sa1->sa_family) {
4185#ifdef INET6
4186	case AF_INET6:
4187		{
4188			/* IPv6 addresses */
4189			struct sockaddr_in6 *sin6_1, *sin6_2;
4190
4191			sin6_1 = (struct sockaddr_in6 *)sa1;
4192			sin6_2 = (struct sockaddr_in6 *)sa2;
4193			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4194			    sin6_2));
4195		}
4196#endif
4197#ifdef INET
4198	case AF_INET:
4199		{
4200			/* IPv4 addresses */
4201			struct sockaddr_in *sin_1, *sin_2;
4202
4203			sin_1 = (struct sockaddr_in *)sa1;
4204			sin_2 = (struct sockaddr_in *)sa2;
4205			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4206		}
4207#endif
4208	default:
4209		/* we don't do these... */
4210		return (0);
4211	}
4212}
4213
4214void
4215sctp_print_address(struct sockaddr *sa)
4216{
4217#ifdef INET6
4218	char ip6buf[INET6_ADDRSTRLEN];
4219
4220#endif
4221
4222	switch (sa->sa_family) {
4223#ifdef INET6
4224	case AF_INET6:
4225		{
4226			struct sockaddr_in6 *sin6;
4227
4228			sin6 = (struct sockaddr_in6 *)sa;
4229			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4230			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4231			    ntohs(sin6->sin6_port),
4232			    sin6->sin6_scope_id);
4233			break;
4234		}
4235#endif
4236#ifdef INET
4237	case AF_INET:
4238		{
4239			struct sockaddr_in *sin;
4240			unsigned char *p;
4241
4242			sin = (struct sockaddr_in *)sa;
4243			p = (unsigned char *)&sin->sin_addr;
4244			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4245			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4246			break;
4247		}
4248#endif
4249	default:
4250		SCTP_PRINTF("?\n");
4251		break;
4252	}
4253}
4254
4255void
4256sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4257    struct sctp_inpcb *new_inp,
4258    struct sctp_tcb *stcb,
4259    int waitflags)
4260{
4261	/*
4262	 * go through our old INP and pull off any control structures that
4263	 * belong to stcb and move then to the new inp.
4264	 */
4265	struct socket *old_so, *new_so;
4266	struct sctp_queued_to_read *control, *nctl;
4267	struct sctp_readhead tmp_queue;
4268	struct mbuf *m;
4269	int error = 0;
4270
4271	old_so = old_inp->sctp_socket;
4272	new_so = new_inp->sctp_socket;
4273	TAILQ_INIT(&tmp_queue);
4274	error = sblock(&old_so->so_rcv, waitflags);
4275	if (error) {
4276		/*
4277		 * Gak, can't get sblock, we have a problem. data will be
4278		 * left stranded.. and we don't dare look at it since the
4279		 * other thread may be reading something. Oh well, its a
4280		 * screwed up app that does a peeloff OR a accept while
4281		 * reading from the main socket... actually its only the
4282		 * peeloff() case, since I think read will fail on a
4283		 * listening socket..
4284		 */
4285		return;
4286	}
4287	/* lock the socket buffers */
4288	SCTP_INP_READ_LOCK(old_inp);
4289	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4290		/* Pull off all for out target stcb */
4291		if (control->stcb == stcb) {
4292			/* remove it we want it */
4293			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4294			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4295			m = control->data;
4296			while (m) {
4297				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4298					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4299				}
4300				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4301				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4302					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4303				}
4304				m = SCTP_BUF_NEXT(m);
4305			}
4306		}
4307	}
4308	SCTP_INP_READ_UNLOCK(old_inp);
4309	/* Remove the sb-lock on the old socket */
4310
4311	sbunlock(&old_so->so_rcv);
4312	/* Now we move them over to the new socket buffer */
4313	SCTP_INP_READ_LOCK(new_inp);
4314	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4315		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4316		m = control->data;
4317		while (m) {
4318			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4319				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4320			}
4321			sctp_sballoc(stcb, &new_so->so_rcv, m);
4322			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4323				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4324			}
4325			m = SCTP_BUF_NEXT(m);
4326		}
4327	}
4328	SCTP_INP_READ_UNLOCK(new_inp);
4329}
4330
4331void
4332sctp_add_to_readq(struct sctp_inpcb *inp,
4333    struct sctp_tcb *stcb,
4334    struct sctp_queued_to_read *control,
4335    struct sockbuf *sb,
4336    int end,
4337    int inp_read_lock_held,
4338    int so_locked
4339#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4340    SCTP_UNUSED
4341#endif
4342)
4343{
4344	/*
4345	 * Here we must place the control on the end of the socket read
4346	 * queue AND increment sb_cc so that select will work properly on
4347	 * read.
4348	 */
4349	struct mbuf *m, *prev = NULL;
4350
4351	if (inp == NULL) {
4352		/* Gak, TSNH!! */
4353#ifdef INVARIANTS
4354		panic("Gak, inp NULL on add_to_readq");
4355#endif
4356		return;
4357	}
4358	if (inp_read_lock_held == 0)
4359		SCTP_INP_READ_LOCK(inp);
4360	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4361		sctp_free_remote_addr(control->whoFrom);
4362		if (control->data) {
4363			sctp_m_freem(control->data);
4364			control->data = NULL;
4365		}
4366		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4367		if (inp_read_lock_held == 0)
4368			SCTP_INP_READ_UNLOCK(inp);
4369		return;
4370	}
4371	if (!(control->spec_flags & M_NOTIFICATION)) {
4372		atomic_add_int(&inp->total_recvs, 1);
4373		if (!control->do_not_ref_stcb) {
4374			atomic_add_int(&stcb->total_recvs, 1);
4375		}
4376	}
4377	m = control->data;
4378	control->held_length = 0;
4379	control->length = 0;
4380	while (m) {
4381		if (SCTP_BUF_LEN(m) == 0) {
4382			/* Skip mbufs with NO length */
4383			if (prev == NULL) {
4384				/* First one */
4385				control->data = sctp_m_free(m);
4386				m = control->data;
4387			} else {
4388				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4389				m = SCTP_BUF_NEXT(prev);
4390			}
4391			if (m == NULL) {
4392				control->tail_mbuf = prev;
4393			}
4394			continue;
4395		}
4396		prev = m;
4397		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4398			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4399		}
4400		sctp_sballoc(stcb, sb, m);
4401		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4402			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4403		}
4404		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4405		m = SCTP_BUF_NEXT(m);
4406	}
4407	if (prev != NULL) {
4408		control->tail_mbuf = prev;
4409	} else {
4410		/* Everything got collapsed out?? */
4411		sctp_free_remote_addr(control->whoFrom);
4412		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4413		if (inp_read_lock_held == 0)
4414			SCTP_INP_READ_UNLOCK(inp);
4415		return;
4416	}
4417	if (end) {
4418		control->end_added = 1;
4419	}
4420	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4421	if (inp_read_lock_held == 0)
4422		SCTP_INP_READ_UNLOCK(inp);
4423	if (inp && inp->sctp_socket) {
4424		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4425			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4426		} else {
4427#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4428			struct socket *so;
4429
4430			so = SCTP_INP_SO(inp);
4431			if (!so_locked) {
4432				if (stcb) {
4433					atomic_add_int(&stcb->asoc.refcnt, 1);
4434					SCTP_TCB_UNLOCK(stcb);
4435				}
4436				SCTP_SOCKET_LOCK(so, 1);
4437				if (stcb) {
4438					SCTP_TCB_LOCK(stcb);
4439					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4440				}
4441				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4442					SCTP_SOCKET_UNLOCK(so, 1);
4443					return;
4444				}
4445			}
4446#endif
4447			sctp_sorwakeup(inp, inp->sctp_socket);
4448#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4449			if (!so_locked) {
4450				SCTP_SOCKET_UNLOCK(so, 1);
4451			}
4452#endif
4453		}
4454	}
4455}
4456
4457
4458int
4459sctp_append_to_readq(struct sctp_inpcb *inp,
4460    struct sctp_tcb *stcb,
4461    struct sctp_queued_to_read *control,
4462    struct mbuf *m,
4463    int end,
4464    int ctls_cumack,
4465    struct sockbuf *sb)
4466{
4467	/*
4468	 * A partial delivery API event is underway. OR we are appending on
4469	 * the reassembly queue.
4470	 *
4471	 * If PDAPI this means we need to add m to the end of the data.
4472	 * Increase the length in the control AND increment the sb_cc.
4473	 * Otherwise sb is NULL and all we need to do is put it at the end
4474	 * of the mbuf chain.
4475	 */
4476	int len = 0;
4477	struct mbuf *mm, *tail = NULL, *prev = NULL;
4478
4479	if (inp) {
4480		SCTP_INP_READ_LOCK(inp);
4481	}
4482	if (control == NULL) {
4483get_out:
4484		if (inp) {
4485			SCTP_INP_READ_UNLOCK(inp);
4486		}
4487		return (-1);
4488	}
4489	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4490		SCTP_INP_READ_UNLOCK(inp);
4491		return (0);
4492	}
4493	if (control->end_added) {
4494		/* huh this one is complete? */
4495		goto get_out;
4496	}
4497	mm = m;
4498	if (mm == NULL) {
4499		goto get_out;
4500	}
4501	while (mm) {
4502		if (SCTP_BUF_LEN(mm) == 0) {
4503			/* Skip mbufs with NO lenght */
4504			if (prev == NULL) {
4505				/* First one */
4506				m = sctp_m_free(mm);
4507				mm = m;
4508			} else {
4509				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4510				mm = SCTP_BUF_NEXT(prev);
4511			}
4512			continue;
4513		}
4514		prev = mm;
4515		len += SCTP_BUF_LEN(mm);
4516		if (sb) {
4517			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4518				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4519			}
4520			sctp_sballoc(stcb, sb, mm);
4521			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4522				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4523			}
4524		}
4525		mm = SCTP_BUF_NEXT(mm);
4526	}
4527	if (prev) {
4528		tail = prev;
4529	} else {
4530		/* Really there should always be a prev */
4531		if (m == NULL) {
4532			/* Huh nothing left? */
4533#ifdef INVARIANTS
4534			panic("Nothing left to add?");
4535#else
4536			goto get_out;
4537#endif
4538		}
4539		tail = m;
4540	}
4541	if (control->tail_mbuf) {
4542		/* append */
4543		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4544		control->tail_mbuf = tail;
4545	} else {
4546		/* nothing there */
4547#ifdef INVARIANTS
4548		if (control->data != NULL) {
4549			panic("This should NOT happen");
4550		}
4551#endif
4552		control->data = m;
4553		control->tail_mbuf = tail;
4554	}
4555	atomic_add_int(&control->length, len);
4556	if (end) {
4557		/* message is complete */
4558		if (stcb && (control == stcb->asoc.control_pdapi)) {
4559			stcb->asoc.control_pdapi = NULL;
4560		}
4561		control->held_length = 0;
4562		control->end_added = 1;
4563	}
4564	if (stcb == NULL) {
4565		control->do_not_ref_stcb = 1;
4566	}
4567	/*
4568	 * When we are appending in partial delivery, the cum-ack is used
4569	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4570	 * is populated in the outbound sinfo structure from the true cumack
4571	 * if the association exists...
4572	 */
4573	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4574	if (inp) {
4575		SCTP_INP_READ_UNLOCK(inp);
4576	}
4577	if (inp && inp->sctp_socket) {
4578		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4579			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4580		} else {
4581#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4582			struct socket *so;
4583
4584			so = SCTP_INP_SO(inp);
4585			if (stcb) {
4586				atomic_add_int(&stcb->asoc.refcnt, 1);
4587				SCTP_TCB_UNLOCK(stcb);
4588			}
4589			SCTP_SOCKET_LOCK(so, 1);
4590			if (stcb) {
4591				SCTP_TCB_LOCK(stcb);
4592				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4593			}
4594			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4595				SCTP_SOCKET_UNLOCK(so, 1);
4596				return (0);
4597			}
4598#endif
4599			sctp_sorwakeup(inp, inp->sctp_socket);
4600#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4601			SCTP_SOCKET_UNLOCK(so, 1);
4602#endif
4603		}
4604	}
4605	return (0);
4606}
4607
4608
4609
4610/*************HOLD THIS COMMENT FOR PATCH FILE OF
4611 *************ALTERNATE ROUTING CODE
4612 */
4613
4614/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4615 *************ALTERNATE ROUTING CODE
4616 */
4617
4618struct mbuf *
4619sctp_generate_cause(uint16_t code, char *info)
4620{
4621	struct mbuf *m;
4622	struct sctp_gen_error_cause *cause;
4623	size_t info_len, len;
4624
4625	if ((code == 0) || (info == NULL)) {
4626		return (NULL);
4627	}
4628	info_len = strlen(info);
4629	len = sizeof(struct sctp_paramhdr) + info_len;
4630	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4631	if (m != NULL) {
4632		SCTP_BUF_LEN(m) = len;
4633		cause = mtod(m, struct sctp_gen_error_cause *);
4634		cause->code = htons(code);
4635		cause->length = htons((uint16_t) len);
4636		memcpy(cause->info, info, info_len);
4637	}
4638	return (m);
4639}
4640
4641struct mbuf *
4642sctp_generate_no_user_data_cause(uint32_t tsn)
4643{
4644	struct mbuf *m;
4645	struct sctp_error_no_user_data *no_user_data_cause;
4646	size_t len;
4647
4648	len = sizeof(struct sctp_error_no_user_data);
4649	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4650	if (m != NULL) {
4651		SCTP_BUF_LEN(m) = len;
4652		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4653		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4654		no_user_data_cause->cause.length = htons((uint16_t) len);
4655		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4656	}
4657	return (m);
4658}
4659
4660#ifdef SCTP_MBCNT_LOGGING
4661void
4662sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4663    struct sctp_tmit_chunk *tp1, int chk_cnt)
4664{
4665	if (tp1->data == NULL) {
4666		return;
4667	}
4668	asoc->chunks_on_out_queue -= chk_cnt;
4669	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4670		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4671		    asoc->total_output_queue_size,
4672		    tp1->book_size,
4673		    0,
4674		    tp1->mbcnt);
4675	}
4676	if (asoc->total_output_queue_size >= tp1->book_size) {
4677		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4678	} else {
4679		asoc->total_output_queue_size = 0;
4680	}
4681
4682	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4683	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4684		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4685			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4686		} else {
4687			stcb->sctp_socket->so_snd.sb_cc = 0;
4688
4689		}
4690	}
4691}
4692
4693#endif
4694
4695int
4696sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4697    uint8_t sent, int so_locked
4698#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4699    SCTP_UNUSED
4700#endif
4701)
4702{
4703	struct sctp_stream_out *strq;
4704	struct sctp_tmit_chunk *chk = NULL, *tp2;
4705	struct sctp_stream_queue_pending *sp;
4706	uint16_t stream = 0, seq = 0;
4707	uint8_t foundeom = 0;
4708	int ret_sz = 0;
4709	int notdone;
4710	int do_wakeup_routine = 0;
4711
4712	stream = tp1->rec.data.stream_number;
4713	seq = tp1->rec.data.stream_seq;
4714	do {
4715		ret_sz += tp1->book_size;
4716		if (tp1->data != NULL) {
4717			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4718				sctp_flight_size_decrease(tp1);
4719				sctp_total_flight_decrease(stcb, tp1);
4720			}
4721			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4722			stcb->asoc.peers_rwnd += tp1->send_size;
4723			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4724			if (sent) {
4725				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4726			} else {
4727				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4728			}
4729			if (tp1->data) {
4730				sctp_m_freem(tp1->data);
4731				tp1->data = NULL;
4732			}
4733			do_wakeup_routine = 1;
4734			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4735				stcb->asoc.sent_queue_cnt_removeable--;
4736			}
4737		}
4738		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4739		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4740		    SCTP_DATA_NOT_FRAG) {
4741			/* not frag'ed we ae done   */
4742			notdone = 0;
4743			foundeom = 1;
4744		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4745			/* end of frag, we are done */
4746			notdone = 0;
4747			foundeom = 1;
4748		} else {
4749			/*
4750			 * Its a begin or middle piece, we must mark all of
4751			 * it
4752			 */
4753			notdone = 1;
4754			tp1 = TAILQ_NEXT(tp1, sctp_next);
4755		}
4756	} while (tp1 && notdone);
4757	if (foundeom == 0) {
4758		/*
4759		 * The multi-part message was scattered across the send and
4760		 * sent queue.
4761		 */
4762		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4763			if ((tp1->rec.data.stream_number != stream) ||
4764			    (tp1->rec.data.stream_seq != seq)) {
4765				break;
4766			}
4767			/*
4768			 * save to chk in case we have some on stream out
4769			 * queue. If so and we have an un-transmitted one we
4770			 * don't have to fudge the TSN.
4771			 */
4772			chk = tp1;
4773			ret_sz += tp1->book_size;
4774			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4775			if (sent) {
4776				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4777			} else {
4778				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4779			}
4780			if (tp1->data) {
4781				sctp_m_freem(tp1->data);
4782				tp1->data = NULL;
4783			}
4784			/* No flight involved here book the size to 0 */
4785			tp1->book_size = 0;
4786			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4787				foundeom = 1;
4788			}
4789			do_wakeup_routine = 1;
4790			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4791			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4792			/*
4793			 * on to the sent queue so we can wait for it to be
4794			 * passed by.
4795			 */
4796			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4797			    sctp_next);
4798			stcb->asoc.send_queue_cnt--;
4799			stcb->asoc.sent_queue_cnt++;
4800		}
4801	}
4802	if (foundeom == 0) {
4803		/*
4804		 * Still no eom found. That means there is stuff left on the
4805		 * stream out queue.. yuck.
4806		 */
4807		SCTP_TCB_SEND_LOCK(stcb);
4808		strq = &stcb->asoc.strmout[stream];
4809		sp = TAILQ_FIRST(&strq->outqueue);
4810		if (sp != NULL) {
4811			sp->discard_rest = 1;
4812			/*
4813			 * We may need to put a chunk on the queue that
4814			 * holds the TSN that would have been sent with the
4815			 * LAST bit.
4816			 */
4817			if (chk == NULL) {
4818				/* Yep, we have to */
4819				sctp_alloc_a_chunk(stcb, chk);
4820				if (chk == NULL) {
4821					/*
4822					 * we are hosed. All we can do is
4823					 * nothing.. which will cause an
4824					 * abort if the peer is paying
4825					 * attention.
4826					 */
4827					goto oh_well;
4828				}
4829				memset(chk, 0, sizeof(*chk));
4830				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4831				chk->sent = SCTP_FORWARD_TSN_SKIP;
4832				chk->asoc = &stcb->asoc;
4833				chk->rec.data.stream_seq = strq->next_sequence_send;
4834				chk->rec.data.stream_number = sp->stream;
4835				chk->rec.data.payloadtype = sp->ppid;
4836				chk->rec.data.context = sp->context;
4837				chk->flags = sp->act_flags;
4838				if (sp->net)
4839					chk->whoTo = sp->net;
4840				else
4841					chk->whoTo = stcb->asoc.primary_destination;
4842				atomic_add_int(&chk->whoTo->ref_count, 1);
4843				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4844				stcb->asoc.pr_sctp_cnt++;
4845				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4846				stcb->asoc.sent_queue_cnt++;
4847				stcb->asoc.pr_sctp_cnt++;
4848			} else {
4849				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4850			}
4851			strq->next_sequence_send++;
4852	oh_well:
4853			if (sp->data) {
4854				/*
4855				 * Pull any data to free up the SB and allow
4856				 * sender to "add more" while we will throw
4857				 * away :-)
4858				 */
4859				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4860				ret_sz += sp->length;
4861				do_wakeup_routine = 1;
4862				sp->some_taken = 1;
4863				sctp_m_freem(sp->data);
4864				sp->data = NULL;
4865				sp->tail_mbuf = NULL;
4866				sp->length = 0;
4867			}
4868		}
4869		SCTP_TCB_SEND_UNLOCK(stcb);
4870	}
4871	if (do_wakeup_routine) {
4872#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4873		struct socket *so;
4874
4875		so = SCTP_INP_SO(stcb->sctp_ep);
4876		if (!so_locked) {
4877			atomic_add_int(&stcb->asoc.refcnt, 1);
4878			SCTP_TCB_UNLOCK(stcb);
4879			SCTP_SOCKET_LOCK(so, 1);
4880			SCTP_TCB_LOCK(stcb);
4881			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4882			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4883				/* assoc was freed while we were unlocked */
4884				SCTP_SOCKET_UNLOCK(so, 1);
4885				return (ret_sz);
4886			}
4887		}
4888#endif
4889		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4890#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4891		if (!so_locked) {
4892			SCTP_SOCKET_UNLOCK(so, 1);
4893		}
4894#endif
4895	}
4896	return (ret_sz);
4897}
4898
4899/*
4900 * checks to see if the given address, sa, is one that is currently known by
4901 * the kernel note: can't distinguish the same address on multiple interfaces
4902 * and doesn't handle multiple addresses with different zone/scope id's note:
4903 * ifa_ifwithaddr() compares the entire sockaddr struct
4904 */
4905struct sctp_ifa *
4906sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4907    int holds_lock)
4908{
4909	struct sctp_laddr *laddr;
4910
4911	if (holds_lock == 0) {
4912		SCTP_INP_RLOCK(inp);
4913	}
4914	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4915		if (laddr->ifa == NULL)
4916			continue;
4917		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4918			continue;
4919#ifdef INET
4920		if (addr->sa_family == AF_INET) {
4921			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4922			    laddr->ifa->address.sin.sin_addr.s_addr) {
4923				/* found him. */
4924				if (holds_lock == 0) {
4925					SCTP_INP_RUNLOCK(inp);
4926				}
4927				return (laddr->ifa);
4928				break;
4929			}
4930		}
4931#endif
4932#ifdef INET6
4933		if (addr->sa_family == AF_INET6) {
4934			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4935			    &laddr->ifa->address.sin6)) {
4936				/* found him. */
4937				if (holds_lock == 0) {
4938					SCTP_INP_RUNLOCK(inp);
4939				}
4940				return (laddr->ifa);
4941				break;
4942			}
4943		}
4944#endif
4945	}
4946	if (holds_lock == 0) {
4947		SCTP_INP_RUNLOCK(inp);
4948	}
4949	return (NULL);
4950}
4951
4952uint32_t
4953sctp_get_ifa_hash_val(struct sockaddr *addr)
4954{
4955	switch (addr->sa_family) {
4956#ifdef INET
4957	case AF_INET:
4958		{
4959			struct sockaddr_in *sin;
4960
4961			sin = (struct sockaddr_in *)addr;
4962			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4963		}
4964#endif
4965#ifdef INET6
4966	case AF_INET6:
4967		{
4968			struct sockaddr_in6 *sin6;
4969			uint32_t hash_of_addr;
4970
4971			sin6 = (struct sockaddr_in6 *)addr;
4972			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4973			    sin6->sin6_addr.s6_addr32[1] +
4974			    sin6->sin6_addr.s6_addr32[2] +
4975			    sin6->sin6_addr.s6_addr32[3]);
4976			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4977			return (hash_of_addr);
4978		}
4979#endif
4980	default:
4981		break;
4982	}
4983	return (0);
4984}
4985
4986struct sctp_ifa *
4987sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4988{
4989	struct sctp_ifa *sctp_ifap;
4990	struct sctp_vrf *vrf;
4991	struct sctp_ifalist *hash_head;
4992	uint32_t hash_of_addr;
4993
4994	if (holds_lock == 0)
4995		SCTP_IPI_ADDR_RLOCK();
4996
4997	vrf = sctp_find_vrf(vrf_id);
4998	if (vrf == NULL) {
4999stage_right:
5000		if (holds_lock == 0)
5001			SCTP_IPI_ADDR_RUNLOCK();
5002		return (NULL);
5003	}
5004	hash_of_addr = sctp_get_ifa_hash_val(addr);
5005
5006	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5007	if (hash_head == NULL) {
5008		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5009		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5010		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5011		sctp_print_address(addr);
5012		SCTP_PRINTF("No such bucket for address\n");
5013		if (holds_lock == 0)
5014			SCTP_IPI_ADDR_RUNLOCK();
5015
5016		return (NULL);
5017	}
5018	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5019		if (sctp_ifap == NULL) {
5020#ifdef INVARIANTS
5021			panic("Huh LIST_FOREACH corrupt");
5022			goto stage_right;
5023#else
5024			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5025			goto stage_right;
5026#endif
5027		}
5028		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5029			continue;
5030#ifdef INET
5031		if (addr->sa_family == AF_INET) {
5032			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5033			    sctp_ifap->address.sin.sin_addr.s_addr) {
5034				/* found him. */
5035				if (holds_lock == 0)
5036					SCTP_IPI_ADDR_RUNLOCK();
5037				return (sctp_ifap);
5038				break;
5039			}
5040		}
5041#endif
5042#ifdef INET6
5043		if (addr->sa_family == AF_INET6) {
5044			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5045			    &sctp_ifap->address.sin6)) {
5046				/* found him. */
5047				if (holds_lock == 0)
5048					SCTP_IPI_ADDR_RUNLOCK();
5049				return (sctp_ifap);
5050				break;
5051			}
5052		}
5053#endif
5054	}
5055	if (holds_lock == 0)
5056		SCTP_IPI_ADDR_RUNLOCK();
5057	return (NULL);
5058}
5059
5060static void
5061sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5062    uint32_t rwnd_req)
5063{
5064	/* User pulled some data, do we need a rwnd update? */
5065	int r_unlocked = 0;
5066	uint32_t dif, rwnd;
5067	struct socket *so = NULL;
5068
5069	if (stcb == NULL)
5070		return;
5071
5072	atomic_add_int(&stcb->asoc.refcnt, 1);
5073
5074	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5075	    SCTP_STATE_SHUTDOWN_RECEIVED |
5076	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5077		/* Pre-check If we are freeing no update */
5078		goto no_lock;
5079	}
5080	SCTP_INP_INCR_REF(stcb->sctp_ep);
5081	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5082	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5083		goto out;
5084	}
5085	so = stcb->sctp_socket;
5086	if (so == NULL) {
5087		goto out;
5088	}
5089	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5090	/* Have you have freed enough to look */
5091	*freed_so_far = 0;
5092	/* Yep, its worth a look and the lock overhead */
5093
5094	/* Figure out what the rwnd would be */
5095	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5096	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5097		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5098	} else {
5099		dif = 0;
5100	}
5101	if (dif >= rwnd_req) {
5102		if (hold_rlock) {
5103			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5104			r_unlocked = 1;
5105		}
5106		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5107			/*
5108			 * One last check before we allow the guy possibly
5109			 * to get in. There is a race, where the guy has not
5110			 * reached the gate. In that case
5111			 */
5112			goto out;
5113		}
5114		SCTP_TCB_LOCK(stcb);
5115		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5116			/* No reports here */
5117			SCTP_TCB_UNLOCK(stcb);
5118			goto out;
5119		}
5120		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5121		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5122
5123		sctp_chunk_output(stcb->sctp_ep, stcb,
5124		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5125		/* make sure no timer is running */
5126		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5127		SCTP_TCB_UNLOCK(stcb);
5128	} else {
5129		/* Update how much we have pending */
5130		stcb->freed_by_sorcv_sincelast = dif;
5131	}
5132out:
5133	if (so && r_unlocked && hold_rlock) {
5134		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5135	}
5136	SCTP_INP_DECR_REF(stcb->sctp_ep);
5137no_lock:
5138	atomic_add_int(&stcb->asoc.refcnt, -1);
5139	return;
5140}
5141
5142int
5143sctp_sorecvmsg(struct socket *so,
5144    struct uio *uio,
5145    struct mbuf **mp,
5146    struct sockaddr *from,
5147    int fromlen,
5148    int *msg_flags,
5149    struct sctp_sndrcvinfo *sinfo,
5150    int filling_sinfo)
5151{
5152	/*
5153	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5154	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5155	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5156	 * On the way out we may send out any combination of:
5157	 * MSG_NOTIFICATION MSG_EOR
5158	 *
5159	 */
5160	struct sctp_inpcb *inp = NULL;
5161	int my_len = 0;
5162	int cp_len = 0, error = 0;
5163	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5164	struct mbuf *m = NULL;
5165	struct sctp_tcb *stcb = NULL;
5166	int wakeup_read_socket = 0;
5167	int freecnt_applied = 0;
5168	int out_flags = 0, in_flags = 0;
5169	int block_allowed = 1;
5170	uint32_t freed_so_far = 0;
5171	uint32_t copied_so_far = 0;
5172	int in_eeor_mode = 0;
5173	int no_rcv_needed = 0;
5174	uint32_t rwnd_req = 0;
5175	int hold_sblock = 0;
5176	int hold_rlock = 0;
5177	int slen = 0;
5178	uint32_t held_length = 0;
5179	int sockbuf_lock = 0;
5180
5181	if (uio == NULL) {
5182		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5183		return (EINVAL);
5184	}
5185	if (msg_flags) {
5186		in_flags = *msg_flags;
5187		if (in_flags & MSG_PEEK)
5188			SCTP_STAT_INCR(sctps_read_peeks);
5189	} else {
5190		in_flags = 0;
5191	}
5192	slen = uio->uio_resid;
5193
5194	/* Pull in and set up our int flags */
5195	if (in_flags & MSG_OOB) {
5196		/* Out of band's NOT supported */
5197		return (EOPNOTSUPP);
5198	}
5199	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5200		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5201		return (EINVAL);
5202	}
5203	if ((in_flags & (MSG_DONTWAIT
5204	    | MSG_NBIO
5205	    )) ||
5206	    SCTP_SO_IS_NBIO(so)) {
5207		block_allowed = 0;
5208	}
5209	/* setup the endpoint */
5210	inp = (struct sctp_inpcb *)so->so_pcb;
5211	if (inp == NULL) {
5212		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5213		return (EFAULT);
5214	}
5215	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5216	/* Must be at least a MTU's worth */
5217	if (rwnd_req < SCTP_MIN_RWND)
5218		rwnd_req = SCTP_MIN_RWND;
5219	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5220	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5221		sctp_misc_ints(SCTP_SORECV_ENTER,
5222		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5223	}
5224	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5225		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5226		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5227	}
5228	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5229	if (error) {
5230		goto release_unlocked;
5231	}
5232	sockbuf_lock = 1;
5233restart:
5234
5235
5236restart_nosblocks:
5237	if (hold_sblock == 0) {
5238		SOCKBUF_LOCK(&so->so_rcv);
5239		hold_sblock = 1;
5240	}
5241	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5242	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5243		goto out;
5244	}
5245	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5246		if (so->so_error) {
5247			error = so->so_error;
5248			if ((in_flags & MSG_PEEK) == 0)
5249				so->so_error = 0;
5250			goto out;
5251		} else {
5252			if (so->so_rcv.sb_cc == 0) {
5253				/* indicate EOF */
5254				error = 0;
5255				goto out;
5256			}
5257		}
5258	}
5259	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5260		/* we need to wait for data */
5261		if ((so->so_rcv.sb_cc == 0) &&
5262		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5263		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5264			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5265				/*
5266				 * For active open side clear flags for
5267				 * re-use passive open is blocked by
5268				 * connect.
5269				 */
5270				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5271					/*
5272					 * You were aborted, passive side
5273					 * always hits here
5274					 */
5275					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5276					error = ECONNRESET;
5277				}
5278				so->so_state &= ~(SS_ISCONNECTING |
5279				    SS_ISDISCONNECTING |
5280				    SS_ISCONFIRMING |
5281				    SS_ISCONNECTED);
5282				if (error == 0) {
5283					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5284						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5285						error = ENOTCONN;
5286					}
5287				}
5288				goto out;
5289			}
5290		}
5291		error = sbwait(&so->so_rcv);
5292		if (error) {
5293			goto out;
5294		}
5295		held_length = 0;
5296		goto restart_nosblocks;
5297	} else if (so->so_rcv.sb_cc == 0) {
5298		if (so->so_error) {
5299			error = so->so_error;
5300			if ((in_flags & MSG_PEEK) == 0)
5301				so->so_error = 0;
5302		} else {
5303			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5304			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5305				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5306					/*
5307					 * For active open side clear flags
5308					 * for re-use passive open is
5309					 * blocked by connect.
5310					 */
5311					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5312						/*
5313						 * You were aborted, passive
5314						 * side always hits here
5315						 */
5316						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5317						error = ECONNRESET;
5318					}
5319					so->so_state &= ~(SS_ISCONNECTING |
5320					    SS_ISDISCONNECTING |
5321					    SS_ISCONFIRMING |
5322					    SS_ISCONNECTED);
5323					if (error == 0) {
5324						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5325							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5326							error = ENOTCONN;
5327						}
5328					}
5329					goto out;
5330				}
5331			}
5332			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5333			error = EWOULDBLOCK;
5334		}
5335		goto out;
5336	}
5337	if (hold_sblock == 1) {
5338		SOCKBUF_UNLOCK(&so->so_rcv);
5339		hold_sblock = 0;
5340	}
5341	/* we possibly have data we can read */
5342	/* sa_ignore FREED_MEMORY */
5343	control = TAILQ_FIRST(&inp->read_queue);
5344	if (control == NULL) {
5345		/*
5346		 * This could be happening since the appender did the
5347		 * increment but as not yet did the tailq insert onto the
5348		 * read_queue
5349		 */
5350		if (hold_rlock == 0) {
5351			SCTP_INP_READ_LOCK(inp);
5352		}
5353		control = TAILQ_FIRST(&inp->read_queue);
5354		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5355#ifdef INVARIANTS
5356			panic("Huh, its non zero and nothing on control?");
5357#endif
5358			so->so_rcv.sb_cc = 0;
5359		}
5360		SCTP_INP_READ_UNLOCK(inp);
5361		hold_rlock = 0;
5362		goto restart;
5363	}
5364	if ((control->length == 0) &&
5365	    (control->do_not_ref_stcb)) {
5366		/*
5367		 * Clean up code for freeing assoc that left behind a
5368		 * pdapi.. maybe a peer in EEOR that just closed after
5369		 * sending and never indicated a EOR.
5370		 */
5371		if (hold_rlock == 0) {
5372			hold_rlock = 1;
5373			SCTP_INP_READ_LOCK(inp);
5374		}
5375		control->held_length = 0;
5376		if (control->data) {
5377			/* Hmm there is data here .. fix */
5378			struct mbuf *m_tmp;
5379			int cnt = 0;
5380
5381			m_tmp = control->data;
5382			while (m_tmp) {
5383				cnt += SCTP_BUF_LEN(m_tmp);
5384				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5385					control->tail_mbuf = m_tmp;
5386					control->end_added = 1;
5387				}
5388				m_tmp = SCTP_BUF_NEXT(m_tmp);
5389			}
5390			control->length = cnt;
5391		} else {
5392			/* remove it */
5393			TAILQ_REMOVE(&inp->read_queue, control, next);
5394			/* Add back any hiddend data */
5395			sctp_free_remote_addr(control->whoFrom);
5396			sctp_free_a_readq(stcb, control);
5397		}
5398		if (hold_rlock) {
5399			hold_rlock = 0;
5400			SCTP_INP_READ_UNLOCK(inp);
5401		}
5402		goto restart;
5403	}
5404	if ((control->length == 0) &&
5405	    (control->end_added == 1)) {
5406		/*
5407		 * Do we also need to check for (control->pdapi_aborted ==
5408		 * 1)?
5409		 */
5410		if (hold_rlock == 0) {
5411			hold_rlock = 1;
5412			SCTP_INP_READ_LOCK(inp);
5413		}
5414		TAILQ_REMOVE(&inp->read_queue, control, next);
5415		if (control->data) {
5416#ifdef INVARIANTS
5417			panic("control->data not null but control->length == 0");
5418#else
5419			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5420			sctp_m_freem(control->data);
5421			control->data = NULL;
5422#endif
5423		}
5424		if (control->aux_data) {
5425			sctp_m_free(control->aux_data);
5426			control->aux_data = NULL;
5427		}
5428		sctp_free_remote_addr(control->whoFrom);
5429		sctp_free_a_readq(stcb, control);
5430		if (hold_rlock) {
5431			hold_rlock = 0;
5432			SCTP_INP_READ_UNLOCK(inp);
5433		}
5434		goto restart;
5435	}
5436	if (control->length == 0) {
5437		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5438		    (filling_sinfo)) {
5439			/* find a more suitable one then this */
5440			ctl = TAILQ_NEXT(control, next);
5441			while (ctl) {
5442				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5443				    (ctl->some_taken ||
5444				    (ctl->spec_flags & M_NOTIFICATION) ||
5445				    ((ctl->do_not_ref_stcb == 0) &&
5446				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5447				    ) {
5448					/*-
5449					 * If we have a different TCB next, and there is data
5450					 * present. If we have already taken some (pdapi), OR we can
5451					 * ref the tcb and no delivery as started on this stream, we
5452					 * take it. Note we allow a notification on a different
5453					 * assoc to be delivered..
5454					 */
5455					control = ctl;
5456					goto found_one;
5457				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5458					    (ctl->length) &&
5459					    ((ctl->some_taken) ||
5460					    ((ctl->do_not_ref_stcb == 0) &&
5461					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5462				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5463					/*-
5464					 * If we have the same tcb, and there is data present, and we
5465					 * have the strm interleave feature present. Then if we have
5466					 * taken some (pdapi) or we can refer to tht tcb AND we have
5467					 * not started a delivery for this stream, we can take it.
5468					 * Note we do NOT allow a notificaiton on the same assoc to
5469					 * be delivered.
5470					 */
5471					control = ctl;
5472					goto found_one;
5473				}
5474				ctl = TAILQ_NEXT(ctl, next);
5475			}
5476		}
5477		/*
5478		 * if we reach here, not suitable replacement is available
5479		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5480		 * into the our held count, and its time to sleep again.
5481		 */
5482		held_length = so->so_rcv.sb_cc;
5483		control->held_length = so->so_rcv.sb_cc;
5484		goto restart;
5485	}
5486	/* Clear the held length since there is something to read */
5487	control->held_length = 0;
5488	if (hold_rlock) {
5489		SCTP_INP_READ_UNLOCK(inp);
5490		hold_rlock = 0;
5491	}
5492found_one:
5493	/*
5494	 * If we reach here, control has a some data for us to read off.
5495	 * Note that stcb COULD be NULL.
5496	 */
5497	control->some_taken++;
5498	if (hold_sblock) {
5499		SOCKBUF_UNLOCK(&so->so_rcv);
5500		hold_sblock = 0;
5501	}
5502	stcb = control->stcb;
5503	if (stcb) {
5504		if ((control->do_not_ref_stcb == 0) &&
5505		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5506			if (freecnt_applied == 0)
5507				stcb = NULL;
5508		} else if (control->do_not_ref_stcb == 0) {
5509			/* you can't free it on me please */
5510			/*
5511			 * The lock on the socket buffer protects us so the
5512			 * free code will stop. But since we used the
5513			 * socketbuf lock and the sender uses the tcb_lock
5514			 * to increment, we need to use the atomic add to
5515			 * the refcnt
5516			 */
5517			if (freecnt_applied) {
5518#ifdef INVARIANTS
5519				panic("refcnt already incremented");
5520#else
5521				SCTP_PRINTF("refcnt already incremented?\n");
5522#endif
5523			} else {
5524				atomic_add_int(&stcb->asoc.refcnt, 1);
5525				freecnt_applied = 1;
5526			}
5527			/*
5528			 * Setup to remember how much we have not yet told
5529			 * the peer our rwnd has opened up. Note we grab the
5530			 * value from the tcb from last time. Note too that
5531			 * sack sending clears this when a sack is sent,
5532			 * which is fine. Once we hit the rwnd_req, we then
5533			 * will go to the sctp_user_rcvd() that will not
5534			 * lock until it KNOWs it MUST send a WUP-SACK.
5535			 */
5536			freed_so_far = stcb->freed_by_sorcv_sincelast;
5537			stcb->freed_by_sorcv_sincelast = 0;
5538		}
5539	}
5540	if (stcb &&
5541	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5542	    control->do_not_ref_stcb == 0) {
5543		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5544	}
5545	/* First lets get off the sinfo and sockaddr info */
5546	if ((sinfo) && filling_sinfo) {
5547		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5548		nxt = TAILQ_NEXT(control, next);
5549		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5550		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5551			struct sctp_extrcvinfo *s_extra;
5552
5553			s_extra = (struct sctp_extrcvinfo *)sinfo;
5554			if ((nxt) &&
5555			    (nxt->length)) {
5556				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5557				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5558					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5559				}
5560				if (nxt->spec_flags & M_NOTIFICATION) {
5561					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5562				}
5563				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5564				s_extra->sreinfo_next_length = nxt->length;
5565				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5566				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5567				if (nxt->tail_mbuf != NULL) {
5568					if (nxt->end_added) {
5569						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5570					}
5571				}
5572			} else {
5573				/*
5574				 * we explicitly 0 this, since the memcpy
5575				 * got some other things beyond the older
5576				 * sinfo_ that is on the control's structure
5577				 * :-D
5578				 */
5579				nxt = NULL;
5580				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5581				s_extra->sreinfo_next_aid = 0;
5582				s_extra->sreinfo_next_length = 0;
5583				s_extra->sreinfo_next_ppid = 0;
5584				s_extra->sreinfo_next_stream = 0;
5585			}
5586		}
5587		/*
5588		 * update off the real current cum-ack, if we have an stcb.
5589		 */
5590		if ((control->do_not_ref_stcb == 0) && stcb)
5591			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5592		/*
5593		 * mask off the high bits, we keep the actual chunk bits in
5594		 * there.
5595		 */
5596		sinfo->sinfo_flags &= 0x00ff;
5597		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5598			sinfo->sinfo_flags |= SCTP_UNORDERED;
5599		}
5600	}
5601#ifdef SCTP_ASOCLOG_OF_TSNS
5602	{
5603		int index, newindex;
5604		struct sctp_pcbtsn_rlog *entry;
5605
5606		do {
5607			index = inp->readlog_index;
5608			newindex = index + 1;
5609			if (newindex >= SCTP_READ_LOG_SIZE) {
5610				newindex = 0;
5611			}
5612		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5613		entry = &inp->readlog[index];
5614		entry->vtag = control->sinfo_assoc_id;
5615		entry->strm = control->sinfo_stream;
5616		entry->seq = control->sinfo_ssn;
5617		entry->sz = control->length;
5618		entry->flgs = control->sinfo_flags;
5619	}
5620#endif
5621	if (fromlen && from) {
5622		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5623		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5624#ifdef INET6
5625		case AF_INET6:
5626			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5627			break;
5628#endif
5629#ifdef INET
5630		case AF_INET:
5631			((struct sockaddr_in *)from)->sin_port = control->port_from;
5632			break;
5633#endif
5634		default:
5635			break;
5636		}
5637		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5638
5639#if defined(INET) && defined(INET6)
5640		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5641		    (from->sa_family == AF_INET) &&
5642		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5643			struct sockaddr_in *sin;
5644			struct sockaddr_in6 sin6;
5645
5646			sin = (struct sockaddr_in *)from;
5647			bzero(&sin6, sizeof(sin6));
5648			sin6.sin6_family = AF_INET6;
5649			sin6.sin6_len = sizeof(struct sockaddr_in6);
5650			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5651			bcopy(&sin->sin_addr,
5652			    &sin6.sin6_addr.s6_addr32[3],
5653			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5654			sin6.sin6_port = sin->sin_port;
5655			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5656		}
5657#endif
5658#ifdef INET6
5659		{
5660			struct sockaddr_in6 lsa6, *from6;
5661
5662			from6 = (struct sockaddr_in6 *)from;
5663			sctp_recover_scope_mac(from6, (&lsa6));
5664		}
5665#endif
5666	}
5667	/* now copy out what data we can */
5668	if (mp == NULL) {
5669		/* copy out each mbuf in the chain up to length */
5670get_more_data:
5671		m = control->data;
5672		while (m) {
5673			/* Move out all we can */
5674			cp_len = (int)uio->uio_resid;
5675			my_len = (int)SCTP_BUF_LEN(m);
5676			if (cp_len > my_len) {
5677				/* not enough in this buf */
5678				cp_len = my_len;
5679			}
5680			if (hold_rlock) {
5681				SCTP_INP_READ_UNLOCK(inp);
5682				hold_rlock = 0;
5683			}
5684			if (cp_len > 0)
5685				error = uiomove(mtod(m, char *), cp_len, uio);
5686			/* re-read */
5687			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5688				goto release;
5689			}
5690			if ((control->do_not_ref_stcb == 0) && stcb &&
5691			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5692				no_rcv_needed = 1;
5693			}
5694			if (error) {
5695				/* error we are out of here */
5696				goto release;
5697			}
5698			if ((SCTP_BUF_NEXT(m) == NULL) &&
5699			    (cp_len >= SCTP_BUF_LEN(m)) &&
5700			    ((control->end_added == 0) ||
5701			    (control->end_added &&
5702			    (TAILQ_NEXT(control, next) == NULL)))
5703			    ) {
5704				SCTP_INP_READ_LOCK(inp);
5705				hold_rlock = 1;
5706			}
5707			if (cp_len == SCTP_BUF_LEN(m)) {
5708				if ((SCTP_BUF_NEXT(m) == NULL) &&
5709				    (control->end_added)) {
5710					out_flags |= MSG_EOR;
5711					if ((control->do_not_ref_stcb == 0) &&
5712					    (control->stcb != NULL) &&
5713					    ((control->spec_flags & M_NOTIFICATION) == 0))
5714						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5715				}
5716				if (control->spec_flags & M_NOTIFICATION) {
5717					out_flags |= MSG_NOTIFICATION;
5718				}
5719				/* we ate up the mbuf */
5720				if (in_flags & MSG_PEEK) {
5721					/* just looking */
5722					m = SCTP_BUF_NEXT(m);
5723					copied_so_far += cp_len;
5724				} else {
5725					/* dispose of the mbuf */
5726					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5727						sctp_sblog(&so->so_rcv,
5728						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5729					}
5730					sctp_sbfree(control, stcb, &so->so_rcv, m);
5731					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5732						sctp_sblog(&so->so_rcv,
5733						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5734					}
5735					copied_so_far += cp_len;
5736					freed_so_far += cp_len;
5737					freed_so_far += MSIZE;
5738					atomic_subtract_int(&control->length, cp_len);
5739					control->data = sctp_m_free(m);
5740					m = control->data;
5741					/*
5742					 * been through it all, must hold sb
5743					 * lock ok to null tail
5744					 */
5745					if (control->data == NULL) {
5746#ifdef INVARIANTS
5747						if ((control->end_added == 0) ||
5748						    (TAILQ_NEXT(control, next) == NULL)) {
5749							/*
5750							 * If the end is not
5751							 * added, OR the
5752							 * next is NOT null
5753							 * we MUST have the
5754							 * lock.
5755							 */
5756							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5757								panic("Hmm we don't own the lock?");
5758							}
5759						}
5760#endif
5761						control->tail_mbuf = NULL;
5762#ifdef INVARIANTS
5763						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5764							panic("end_added, nothing left and no MSG_EOR");
5765						}
5766#endif
5767					}
5768				}
5769			} else {
5770				/* Do we need to trim the mbuf? */
5771				if (control->spec_flags & M_NOTIFICATION) {
5772					out_flags |= MSG_NOTIFICATION;
5773				}
5774				if ((in_flags & MSG_PEEK) == 0) {
5775					SCTP_BUF_RESV_UF(m, cp_len);
5776					SCTP_BUF_LEN(m) -= cp_len;
5777					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5778						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5779					}
5780					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5781					if ((control->do_not_ref_stcb == 0) &&
5782					    stcb) {
5783						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5784					}
5785					copied_so_far += cp_len;
5786					freed_so_far += cp_len;
5787					freed_so_far += MSIZE;
5788					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5789						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5790						    SCTP_LOG_SBRESULT, 0);
5791					}
5792					atomic_subtract_int(&control->length, cp_len);
5793				} else {
5794					copied_so_far += cp_len;
5795				}
5796			}
5797			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5798				break;
5799			}
5800			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5801			    (control->do_not_ref_stcb == 0) &&
5802			    (freed_so_far >= rwnd_req)) {
5803				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5804			}
5805		}		/* end while(m) */
5806		/*
5807		 * At this point we have looked at it all and we either have
5808		 * a MSG_EOR/or read all the user wants... <OR>
5809		 * control->length == 0.
5810		 */
5811		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5812			/* we are done with this control */
5813			if (control->length == 0) {
5814				if (control->data) {
5815#ifdef INVARIANTS
5816					panic("control->data not null at read eor?");
5817#else
5818					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5819					sctp_m_freem(control->data);
5820					control->data = NULL;
5821#endif
5822				}
5823		done_with_control:
5824				if (TAILQ_NEXT(control, next) == NULL) {
5825					/*
5826					 * If we don't have a next we need a
5827					 * lock, if there is a next
5828					 * interrupt is filling ahead of us
5829					 * and we don't need a lock to
5830					 * remove this guy (which is the
5831					 * head of the queue).
5832					 */
5833					if (hold_rlock == 0) {
5834						SCTP_INP_READ_LOCK(inp);
5835						hold_rlock = 1;
5836					}
5837				}
5838				TAILQ_REMOVE(&inp->read_queue, control, next);
5839				/* Add back any hiddend data */
5840				if (control->held_length) {
5841					held_length = 0;
5842					control->held_length = 0;
5843					wakeup_read_socket = 1;
5844				}
5845				if (control->aux_data) {
5846					sctp_m_free(control->aux_data);
5847					control->aux_data = NULL;
5848				}
5849				no_rcv_needed = control->do_not_ref_stcb;
5850				sctp_free_remote_addr(control->whoFrom);
5851				control->data = NULL;
5852				sctp_free_a_readq(stcb, control);
5853				control = NULL;
5854				if ((freed_so_far >= rwnd_req) &&
5855				    (no_rcv_needed == 0))
5856					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5857
5858			} else {
5859				/*
5860				 * The user did not read all of this
5861				 * message, turn off the returned MSG_EOR
5862				 * since we are leaving more behind on the
5863				 * control to read.
5864				 */
5865#ifdef INVARIANTS
5866				if (control->end_added &&
5867				    (control->data == NULL) &&
5868				    (control->tail_mbuf == NULL)) {
5869					panic("Gak, control->length is corrupt?");
5870				}
5871#endif
5872				no_rcv_needed = control->do_not_ref_stcb;
5873				out_flags &= ~MSG_EOR;
5874			}
5875		}
5876		if (out_flags & MSG_EOR) {
5877			goto release;
5878		}
5879		if ((uio->uio_resid == 0) ||
5880		    ((in_eeor_mode) &&
5881		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5882			goto release;
5883		}
5884		/*
5885		 * If I hit here the receiver wants more and this message is
5886		 * NOT done (pd-api). So two questions. Can we block? if not
5887		 * we are done. Did the user NOT set MSG_WAITALL?
5888		 */
5889		if (block_allowed == 0) {
5890			goto release;
5891		}
5892		/*
5893		 * We need to wait for more data a few things: - We don't
5894		 * sbunlock() so we don't get someone else reading. - We
5895		 * must be sure to account for the case where what is added
5896		 * is NOT to our control when we wakeup.
5897		 */
5898
5899		/*
5900		 * Do we need to tell the transport a rwnd update might be
5901		 * needed before we go to sleep?
5902		 */
5903		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5904		    ((freed_so_far >= rwnd_req) &&
5905		    (control->do_not_ref_stcb == 0) &&
5906		    (no_rcv_needed == 0))) {
5907			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5908		}
5909wait_some_more:
5910		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5911			goto release;
5912		}
5913		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5914			goto release;
5915
5916		if (hold_rlock == 1) {
5917			SCTP_INP_READ_UNLOCK(inp);
5918			hold_rlock = 0;
5919		}
5920		if (hold_sblock == 0) {
5921			SOCKBUF_LOCK(&so->so_rcv);
5922			hold_sblock = 1;
5923		}
5924		if ((copied_so_far) && (control->length == 0) &&
5925		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5926			goto release;
5927		}
5928		if (so->so_rcv.sb_cc <= control->held_length) {
5929			error = sbwait(&so->so_rcv);
5930			if (error) {
5931				goto release;
5932			}
5933			control->held_length = 0;
5934		}
5935		if (hold_sblock) {
5936			SOCKBUF_UNLOCK(&so->so_rcv);
5937			hold_sblock = 0;
5938		}
5939		if (control->length == 0) {
5940			/* still nothing here */
5941			if (control->end_added == 1) {
5942				/* he aborted, or is done i.e.did a shutdown */
5943				out_flags |= MSG_EOR;
5944				if (control->pdapi_aborted) {
5945					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5946						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5947
5948					out_flags |= MSG_TRUNC;
5949				} else {
5950					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5951						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5952				}
5953				goto done_with_control;
5954			}
5955			if (so->so_rcv.sb_cc > held_length) {
5956				control->held_length = so->so_rcv.sb_cc;
5957				held_length = 0;
5958			}
5959			goto wait_some_more;
5960		} else if (control->data == NULL) {
5961			/*
5962			 * we must re-sync since data is probably being
5963			 * added
5964			 */
5965			SCTP_INP_READ_LOCK(inp);
5966			if ((control->length > 0) && (control->data == NULL)) {
5967				/*
5968				 * big trouble.. we have the lock and its
5969				 * corrupt?
5970				 */
5971#ifdef INVARIANTS
5972				panic("Impossible data==NULL length !=0");
5973#endif
5974				out_flags |= MSG_EOR;
5975				out_flags |= MSG_TRUNC;
5976				control->length = 0;
5977				SCTP_INP_READ_UNLOCK(inp);
5978				goto done_with_control;
5979			}
5980			SCTP_INP_READ_UNLOCK(inp);
5981			/* We will fall around to get more data */
5982		}
5983		goto get_more_data;
5984	} else {
5985		/*-
5986		 * Give caller back the mbuf chain,
5987		 * store in uio_resid the length
5988		 */
5989		wakeup_read_socket = 0;
5990		if ((control->end_added == 0) ||
5991		    (TAILQ_NEXT(control, next) == NULL)) {
5992			/* Need to get rlock */
5993			if (hold_rlock == 0) {
5994				SCTP_INP_READ_LOCK(inp);
5995				hold_rlock = 1;
5996			}
5997		}
5998		if (control->end_added) {
5999			out_flags |= MSG_EOR;
6000			if ((control->do_not_ref_stcb == 0) &&
6001			    (control->stcb != NULL) &&
6002			    ((control->spec_flags & M_NOTIFICATION) == 0))
6003				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6004		}
6005		if (control->spec_flags & M_NOTIFICATION) {
6006			out_flags |= MSG_NOTIFICATION;
6007		}
6008		uio->uio_resid = control->length;
6009		*mp = control->data;
6010		m = control->data;
6011		while (m) {
6012			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6013				sctp_sblog(&so->so_rcv,
6014				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6015			}
6016			sctp_sbfree(control, stcb, &so->so_rcv, m);
6017			freed_so_far += SCTP_BUF_LEN(m);
6018			freed_so_far += MSIZE;
6019			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6020				sctp_sblog(&so->so_rcv,
6021				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6022			}
6023			m = SCTP_BUF_NEXT(m);
6024		}
6025		control->data = control->tail_mbuf = NULL;
6026		control->length = 0;
6027		if (out_flags & MSG_EOR) {
6028			/* Done with this control */
6029			goto done_with_control;
6030		}
6031	}
6032release:
6033	if (hold_rlock == 1) {
6034		SCTP_INP_READ_UNLOCK(inp);
6035		hold_rlock = 0;
6036	}
6037	if (hold_sblock == 1) {
6038		SOCKBUF_UNLOCK(&so->so_rcv);
6039		hold_sblock = 0;
6040	}
6041	sbunlock(&so->so_rcv);
6042	sockbuf_lock = 0;
6043
6044release_unlocked:
6045	if (hold_sblock) {
6046		SOCKBUF_UNLOCK(&so->so_rcv);
6047		hold_sblock = 0;
6048	}
6049	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6050		if ((freed_so_far >= rwnd_req) &&
6051		    (control && (control->do_not_ref_stcb == 0)) &&
6052		    (no_rcv_needed == 0))
6053			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6054	}
6055out:
6056	if (msg_flags) {
6057		*msg_flags = out_flags;
6058	}
6059	if (((out_flags & MSG_EOR) == 0) &&
6060	    ((in_flags & MSG_PEEK) == 0) &&
6061	    (sinfo) &&
6062	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6063	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6064		struct sctp_extrcvinfo *s_extra;
6065
6066		s_extra = (struct sctp_extrcvinfo *)sinfo;
6067		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6068	}
6069	if (hold_rlock == 1) {
6070		SCTP_INP_READ_UNLOCK(inp);
6071	}
6072	if (hold_sblock) {
6073		SOCKBUF_UNLOCK(&so->so_rcv);
6074	}
6075	if (sockbuf_lock) {
6076		sbunlock(&so->so_rcv);
6077	}
6078	if (freecnt_applied) {
6079		/*
6080		 * The lock on the socket buffer protects us so the free
6081		 * code will stop. But since we used the socketbuf lock and
6082		 * the sender uses the tcb_lock to increment, we need to use
6083		 * the atomic add to the refcnt.
6084		 */
6085		if (stcb == NULL) {
6086#ifdef INVARIANTS
6087			panic("stcb for refcnt has gone NULL?");
6088			goto stage_left;
6089#else
6090			goto stage_left;
6091#endif
6092		}
6093		atomic_add_int(&stcb->asoc.refcnt, -1);
6094		/* Save the value back for next time */
6095		stcb->freed_by_sorcv_sincelast = freed_so_far;
6096	}
6097	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6098		if (stcb) {
6099			sctp_misc_ints(SCTP_SORECV_DONE,
6100			    freed_so_far,
6101			    ((uio) ? (slen - uio->uio_resid) : slen),
6102			    stcb->asoc.my_rwnd,
6103			    so->so_rcv.sb_cc);
6104		} else {
6105			sctp_misc_ints(SCTP_SORECV_DONE,
6106			    freed_so_far,
6107			    ((uio) ? (slen - uio->uio_resid) : slen),
6108			    0,
6109			    so->so_rcv.sb_cc);
6110		}
6111	}
6112stage_left:
6113	if (wakeup_read_socket) {
6114		sctp_sorwakeup(inp, so);
6115	}
6116	return (error);
6117}
6118
6119
6120#ifdef SCTP_MBUF_LOGGING
6121struct mbuf *
6122sctp_m_free(struct mbuf *m)
6123{
6124	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6125		if (SCTP_BUF_IS_EXTENDED(m)) {
6126			sctp_log_mb(m, SCTP_MBUF_IFREE);
6127		}
6128	}
6129	return (m_free(m));
6130}
6131
6132void
6133sctp_m_freem(struct mbuf *mb)
6134{
6135	while (mb != NULL)
6136		mb = sctp_m_free(mb);
6137}
6138
6139#endif
6140
6141int
6142sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6143{
6144	/*
6145	 * Given a local address. For all associations that holds the
6146	 * address, request a peer-set-primary.
6147	 */
6148	struct sctp_ifa *ifa;
6149	struct sctp_laddr *wi;
6150
6151	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6152	if (ifa == NULL) {
6153		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6154		return (EADDRNOTAVAIL);
6155	}
6156	/*
6157	 * Now that we have the ifa we must awaken the iterator with this
6158	 * message.
6159	 */
6160	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6161	if (wi == NULL) {
6162		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6163		return (ENOMEM);
6164	}
6165	/* Now incr the count and int wi structure */
6166	SCTP_INCR_LADDR_COUNT();
6167	bzero(wi, sizeof(*wi));
6168	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6169	wi->ifa = ifa;
6170	wi->action = SCTP_SET_PRIM_ADDR;
6171	atomic_add_int(&ifa->refcount, 1);
6172
6173	/* Now add it to the work queue */
6174	SCTP_WQ_ADDR_LOCK();
6175	/*
6176	 * Should this really be a tailq? As it is we will process the
6177	 * newest first :-0
6178	 */
6179	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6180	SCTP_WQ_ADDR_UNLOCK();
6181	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6182	    (struct sctp_inpcb *)NULL,
6183	    (struct sctp_tcb *)NULL,
6184	    (struct sctp_nets *)NULL);
6185	return (0);
6186}
6187
6188
6189int
6190sctp_soreceive(struct socket *so,
6191    struct sockaddr **psa,
6192    struct uio *uio,
6193    struct mbuf **mp0,
6194    struct mbuf **controlp,
6195    int *flagsp)
6196{
6197	int error, fromlen;
6198	uint8_t sockbuf[256];
6199	struct sockaddr *from;
6200	struct sctp_extrcvinfo sinfo;
6201	int filling_sinfo = 1;
6202	struct sctp_inpcb *inp;
6203
6204	inp = (struct sctp_inpcb *)so->so_pcb;
6205	/* pickup the assoc we are reading from */
6206	if (inp == NULL) {
6207		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6208		return (EINVAL);
6209	}
6210	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6211	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6212	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6213	    (controlp == NULL)) {
6214		/* user does not want the sndrcv ctl */
6215		filling_sinfo = 0;
6216	}
6217	if (psa) {
6218		from = (struct sockaddr *)sockbuf;
6219		fromlen = sizeof(sockbuf);
6220		from->sa_len = 0;
6221	} else {
6222		from = NULL;
6223		fromlen = 0;
6224	}
6225
6226	if (filling_sinfo) {
6227		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6228	}
6229	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6230	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6231	if (controlp != NULL) {
6232		/* copy back the sinfo in a CMSG format */
6233		if (filling_sinfo)
6234			*controlp = sctp_build_ctl_nchunk(inp,
6235			    (struct sctp_sndrcvinfo *)&sinfo);
6236		else
6237			*controlp = NULL;
6238	}
6239	if (psa) {
6240		/* copy back the address info */
6241		if (from && from->sa_len) {
6242			*psa = sodupsockaddr(from, M_NOWAIT);
6243		} else {
6244			*psa = NULL;
6245		}
6246	}
6247	return (error);
6248}
6249
6250
6251
6252
6253
6254int
6255sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6256    int totaddr, int *error)
6257{
6258	int added = 0;
6259	int i;
6260	struct sctp_inpcb *inp;
6261	struct sockaddr *sa;
6262	size_t incr = 0;
6263
6264#ifdef INET
6265	struct sockaddr_in *sin;
6266
6267#endif
6268#ifdef INET6
6269	struct sockaddr_in6 *sin6;
6270
6271#endif
6272
6273	sa = addr;
6274	inp = stcb->sctp_ep;
6275	*error = 0;
6276	for (i = 0; i < totaddr; i++) {
6277		switch (sa->sa_family) {
6278#ifdef INET
6279		case AF_INET:
6280			incr = sizeof(struct sockaddr_in);
6281			sin = (struct sockaddr_in *)sa;
6282			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6283			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6284			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6285				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6286				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6287				*error = EINVAL;
6288				goto out_now;
6289			}
6290			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6291				/* assoc gone no un-lock */
6292				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6293				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6294				*error = ENOBUFS;
6295				goto out_now;
6296			}
6297			added++;
6298			break;
6299#endif
6300#ifdef INET6
6301		case AF_INET6:
6302			incr = sizeof(struct sockaddr_in6);
6303			sin6 = (struct sockaddr_in6 *)sa;
6304			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6305			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6306				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6307				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6308				*error = EINVAL;
6309				goto out_now;
6310			}
6311			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6312				/* assoc gone no un-lock */
6313				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6314				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6315				*error = ENOBUFS;
6316				goto out_now;
6317			}
6318			added++;
6319			break;
6320#endif
6321		default:
6322			break;
6323		}
6324		sa = (struct sockaddr *)((caddr_t)sa + incr);
6325	}
6326out_now:
6327	return (added);
6328}
6329
6330struct sctp_tcb *
6331sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6332    int *totaddr, int *num_v4, int *num_v6, int *error,
6333    int limit, int *bad_addr)
6334{
6335	struct sockaddr *sa;
6336	struct sctp_tcb *stcb = NULL;
6337	size_t incr, at, i;
6338
6339	at = incr = 0;
6340	sa = addr;
6341
6342	*error = *num_v6 = *num_v4 = 0;
6343	/* account and validate addresses */
6344	for (i = 0; i < (size_t)*totaddr; i++) {
6345		switch (sa->sa_family) {
6346#ifdef INET
6347		case AF_INET:
6348			(*num_v4) += 1;
6349			incr = sizeof(struct sockaddr_in);
6350			if (sa->sa_len != incr) {
6351				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6352				*error = EINVAL;
6353				*bad_addr = 1;
6354				return (NULL);
6355			}
6356			break;
6357#endif
6358#ifdef INET6
6359		case AF_INET6:
6360			{
6361				struct sockaddr_in6 *sin6;
6362
6363				sin6 = (struct sockaddr_in6 *)sa;
6364				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6365					/* Must be non-mapped for connectx */
6366					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6367					*error = EINVAL;
6368					*bad_addr = 1;
6369					return (NULL);
6370				}
6371				(*num_v6) += 1;
6372				incr = sizeof(struct sockaddr_in6);
6373				if (sa->sa_len != incr) {
6374					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6375					*error = EINVAL;
6376					*bad_addr = 1;
6377					return (NULL);
6378				}
6379				break;
6380			}
6381#endif
6382		default:
6383			*totaddr = i;
6384			/* we are done */
6385			break;
6386		}
6387		if (i == (size_t)*totaddr) {
6388			break;
6389		}
6390		SCTP_INP_INCR_REF(inp);
6391		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6392		if (stcb != NULL) {
6393			/* Already have or am bring up an association */
6394			return (stcb);
6395		} else {
6396			SCTP_INP_DECR_REF(inp);
6397		}
6398		if ((at + incr) > (size_t)limit) {
6399			*totaddr = i;
6400			break;
6401		}
6402		sa = (struct sockaddr *)((caddr_t)sa + incr);
6403	}
6404	return ((struct sctp_tcb *)NULL);
6405}
6406
6407/*
6408 * sctp_bindx(ADD) for one address.
6409 * assumes all arguments are valid/checked by caller.
6410 */
6411void
6412sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6413    struct sockaddr *sa, sctp_assoc_t assoc_id,
6414    uint32_t vrf_id, int *error, void *p)
6415{
6416	struct sockaddr *addr_touse;
6417
6418#ifdef INET6
6419	struct sockaddr_in sin;
6420
6421#endif
6422
6423	/* see if we're bound all already! */
6424	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6425		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6426		*error = EINVAL;
6427		return;
6428	}
6429	addr_touse = sa;
6430#ifdef INET6
6431	if (sa->sa_family == AF_INET6) {
6432		struct sockaddr_in6 *sin6;
6433
6434		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6435			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6436			*error = EINVAL;
6437			return;
6438		}
6439		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6440			/* can only bind v6 on PF_INET6 sockets */
6441			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6442			*error = EINVAL;
6443			return;
6444		}
6445		sin6 = (struct sockaddr_in6 *)addr_touse;
6446		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6447			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6448			    SCTP_IPV6_V6ONLY(inp)) {
6449				/* can't bind v4-mapped on PF_INET sockets */
6450				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6451				*error = EINVAL;
6452				return;
6453			}
6454			in6_sin6_2_sin(&sin, sin6);
6455			addr_touse = (struct sockaddr *)&sin;
6456		}
6457	}
6458#endif
6459#ifdef INET
6460	if (sa->sa_family == AF_INET) {
6461		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6462			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6463			*error = EINVAL;
6464			return;
6465		}
6466		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6467		    SCTP_IPV6_V6ONLY(inp)) {
6468			/* can't bind v4 on PF_INET sockets */
6469			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6470			*error = EINVAL;
6471			return;
6472		}
6473	}
6474#endif
6475	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6476		if (p == NULL) {
6477			/* Can't get proc for Net/Open BSD */
6478			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6479			*error = EINVAL;
6480			return;
6481		}
6482		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6483		return;
6484	}
6485	/*
6486	 * No locks required here since bind and mgmt_ep_sa all do their own
6487	 * locking. If we do something for the FIX: below we may need to
6488	 * lock in that case.
6489	 */
6490	if (assoc_id == 0) {
6491		/* add the address */
6492		struct sctp_inpcb *lep;
6493		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6494
6495		/* validate the incoming port */
6496		if ((lsin->sin_port != 0) &&
6497		    (lsin->sin_port != inp->sctp_lport)) {
6498			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6499			*error = EINVAL;
6500			return;
6501		} else {
6502			/* user specified 0 port, set it to existing port */
6503			lsin->sin_port = inp->sctp_lport;
6504		}
6505
6506		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6507		if (lep != NULL) {
6508			/*
6509			 * We must decrement the refcount since we have the
6510			 * ep already and are binding. No remove going on
6511			 * here.
6512			 */
6513			SCTP_INP_DECR_REF(lep);
6514		}
6515		if (lep == inp) {
6516			/* already bound to it.. ok */
6517			return;
6518		} else if (lep == NULL) {
6519			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6520			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6521			    SCTP_ADD_IP_ADDRESS,
6522			    vrf_id, NULL);
6523		} else {
6524			*error = EADDRINUSE;
6525		}
6526		if (*error)
6527			return;
6528	} else {
6529		/*
6530		 * FIX: decide whether we allow assoc based bindx
6531		 */
6532	}
6533}
6534
6535/*
6536 * sctp_bindx(DELETE) for one address.
6537 * assumes all arguments are valid/checked by caller.
6538 */
6539void
6540sctp_bindx_delete_address(struct sctp_inpcb *inp,
6541    struct sockaddr *sa, sctp_assoc_t assoc_id,
6542    uint32_t vrf_id, int *error)
6543{
6544	struct sockaddr *addr_touse;
6545
6546#ifdef INET6
6547	struct sockaddr_in sin;
6548
6549#endif
6550
6551	/* see if we're bound all already! */
6552	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6553		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6554		*error = EINVAL;
6555		return;
6556	}
6557	addr_touse = sa;
6558#ifdef INET6
6559	if (sa->sa_family == AF_INET6) {
6560		struct sockaddr_in6 *sin6;
6561
6562		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6563			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6564			*error = EINVAL;
6565			return;
6566		}
6567		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6568			/* can only bind v6 on PF_INET6 sockets */
6569			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6570			*error = EINVAL;
6571			return;
6572		}
6573		sin6 = (struct sockaddr_in6 *)addr_touse;
6574		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6575			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6576			    SCTP_IPV6_V6ONLY(inp)) {
6577				/* can't bind mapped-v4 on PF_INET sockets */
6578				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6579				*error = EINVAL;
6580				return;
6581			}
6582			in6_sin6_2_sin(&sin, sin6);
6583			addr_touse = (struct sockaddr *)&sin;
6584		}
6585	}
6586#endif
6587#ifdef INET
6588	if (sa->sa_family == AF_INET) {
6589		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6590			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6591			*error = EINVAL;
6592			return;
6593		}
6594		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6595		    SCTP_IPV6_V6ONLY(inp)) {
6596			/* can't bind v4 on PF_INET sockets */
6597			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6598			*error = EINVAL;
6599			return;
6600		}
6601	}
6602#endif
6603	/*
6604	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6605	 * below is ever changed we may need to lock before calling
6606	 * association level binding.
6607	 */
6608	if (assoc_id == 0) {
6609		/* delete the address */
6610		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6611		    SCTP_DEL_IP_ADDRESS,
6612		    vrf_id, NULL);
6613	} else {
6614		/*
6615		 * FIX: decide whether we allow assoc based bindx
6616		 */
6617	}
6618}
6619
6620/*
6621 * returns the valid local address count for an assoc, taking into account
6622 * all scoping rules
6623 */
6624int
6625sctp_local_addr_count(struct sctp_tcb *stcb)
6626{
6627	int loopback_scope;
6628
6629#if defined(INET)
6630	int ipv4_local_scope, ipv4_addr_legal;
6631
6632#endif
6633#if defined (INET6)
6634	int local_scope, site_scope, ipv6_addr_legal;
6635
6636#endif
6637	struct sctp_vrf *vrf;
6638	struct sctp_ifn *sctp_ifn;
6639	struct sctp_ifa *sctp_ifa;
6640	int count = 0;
6641
6642	/* Turn on all the appropriate scopes */
6643	loopback_scope = stcb->asoc.scope.loopback_scope;
6644#if defined(INET)
6645	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6646	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6647#endif
6648#if defined(INET6)
6649	local_scope = stcb->asoc.scope.local_scope;
6650	site_scope = stcb->asoc.scope.site_scope;
6651	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6652#endif
6653	SCTP_IPI_ADDR_RLOCK();
6654	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6655	if (vrf == NULL) {
6656		/* no vrf, no addresses */
6657		SCTP_IPI_ADDR_RUNLOCK();
6658		return (0);
6659	}
6660	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6661		/*
6662		 * bound all case: go through all ifns on the vrf
6663		 */
6664		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6665			if ((loopback_scope == 0) &&
6666			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6667				continue;
6668			}
6669			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6670				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6671					continue;
6672				switch (sctp_ifa->address.sa.sa_family) {
6673#ifdef INET
6674				case AF_INET:
6675					if (ipv4_addr_legal) {
6676						struct sockaddr_in *sin;
6677
6678						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6679						if (sin->sin_addr.s_addr == 0) {
6680							/*
6681							 * skip unspecified
6682							 * addrs
6683							 */
6684							continue;
6685						}
6686						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6687						    &sin->sin_addr) != 0) {
6688							continue;
6689						}
6690						if ((ipv4_local_scope == 0) &&
6691						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6692							continue;
6693						}
6694						/* count this one */
6695						count++;
6696					} else {
6697						continue;
6698					}
6699					break;
6700#endif
6701#ifdef INET6
6702				case AF_INET6:
6703					if (ipv6_addr_legal) {
6704						struct sockaddr_in6 *sin6;
6705
6706						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6707						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6708							continue;
6709						}
6710						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6711						    &sin6->sin6_addr) != 0) {
6712							continue;
6713						}
6714						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6715							if (local_scope == 0)
6716								continue;
6717							if (sin6->sin6_scope_id == 0) {
6718								if (sa6_recoverscope(sin6) != 0)
6719									/*
6720									 *
6721									 * bad
6722									 *
6723									 * li
6724									 * nk
6725									 *
6726									 * loc
6727									 * al
6728									 *
6729									 * add
6730									 * re
6731									 * ss
6732									 * */
6733									continue;
6734							}
6735						}
6736						if ((site_scope == 0) &&
6737						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6738							continue;
6739						}
6740						/* count this one */
6741						count++;
6742					}
6743					break;
6744#endif
6745				default:
6746					/* TSNH */
6747					break;
6748				}
6749			}
6750		}
6751	} else {
6752		/*
6753		 * subset bound case
6754		 */
6755		struct sctp_laddr *laddr;
6756
6757		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6758		    sctp_nxt_addr) {
6759			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6760				continue;
6761			}
6762			/* count this one */
6763			count++;
6764		}
6765	}
6766	SCTP_IPI_ADDR_RUNLOCK();
6767	return (count);
6768}
6769
6770#if defined(SCTP_LOCAL_TRACE_BUF)
6771
6772void
6773sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6774{
6775	uint32_t saveindex, newindex;
6776
6777	do {
6778		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6779		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6780			newindex = 1;
6781		} else {
6782			newindex = saveindex + 1;
6783		}
6784	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6785	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6786		saveindex = 0;
6787	}
6788	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6789	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6790	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6791	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6792	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6793	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6794	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6795	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6796}
6797
6798#endif
6799static void
6800sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6801{
6802	struct ip *iph;
6803
6804#ifdef INET6
6805	struct ip6_hdr *ip6;
6806
6807#endif
6808	struct mbuf *sp, *last;
6809	struct udphdr *uhdr;
6810	uint16_t port;
6811
6812	if ((m->m_flags & M_PKTHDR) == 0) {
6813		/* Can't handle one that is not a pkt hdr */
6814		goto out;
6815	}
6816	/* Pull the src port */
6817	iph = mtod(m, struct ip *);
6818	uhdr = (struct udphdr *)((caddr_t)iph + off);
6819	port = uhdr->uh_sport;
6820	/*
6821	 * Split out the mbuf chain. Leave the IP header in m, place the
6822	 * rest in the sp.
6823	 */
6824	sp = m_split(m, off, M_NOWAIT);
6825	if (sp == NULL) {
6826		/* Gak, drop packet, we can't do a split */
6827		goto out;
6828	}
6829	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6830		/* Gak, packet can't have an SCTP header in it - too small */
6831		m_freem(sp);
6832		goto out;
6833	}
6834	/* Now pull up the UDP header and SCTP header together */
6835	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6836	if (sp == NULL) {
6837		/* Gak pullup failed */
6838		goto out;
6839	}
6840	/* Trim out the UDP header */
6841	m_adj(sp, sizeof(struct udphdr));
6842
6843	/* Now reconstruct the mbuf chain */
6844	for (last = m; last->m_next; last = last->m_next);
6845	last->m_next = sp;
6846	m->m_pkthdr.len += sp->m_pkthdr.len;
6847	iph = mtod(m, struct ip *);
6848	switch (iph->ip_v) {
6849#ifdef INET
6850	case IPVERSION:
6851		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6852		sctp_input_with_port(m, off, port);
6853		break;
6854#endif
6855#ifdef INET6
6856	case IPV6_VERSION >> 4:
6857		ip6 = mtod(m, struct ip6_hdr *);
6858		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6859		sctp6_input_with_port(&m, &off, port);
6860		break;
6861#endif
6862	default:
6863		goto out;
6864		break;
6865	}
6866	return;
6867out:
6868	m_freem(m);
6869}
6870
6871void
6872sctp_over_udp_stop(void)
6873{
6874	/*
6875	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6876	 * for writting!
6877	 */
6878#ifdef INET
6879	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6880		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6881		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6882	}
6883#endif
6884#ifdef INET6
6885	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6886		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6887		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6888	}
6889#endif
6890}
6891
6892int
6893sctp_over_udp_start(void)
6894{
6895	uint16_t port;
6896	int ret;
6897
6898#ifdef INET
6899	struct sockaddr_in sin;
6900
6901#endif
6902#ifdef INET6
6903	struct sockaddr_in6 sin6;
6904
6905#endif
6906	/*
6907	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6908	 * for writting!
6909	 */
6910	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6911	if (ntohs(port) == 0) {
6912		/* Must have a port set */
6913		return (EINVAL);
6914	}
6915#ifdef INET
6916	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6917		/* Already running -- must stop first */
6918		return (EALREADY);
6919	}
6920#endif
6921#ifdef INET6
6922	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6923		/* Already running -- must stop first */
6924		return (EALREADY);
6925	}
6926#endif
6927#ifdef INET
6928	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6929	    SOCK_DGRAM, IPPROTO_UDP,
6930	    curthread->td_ucred, curthread))) {
6931		sctp_over_udp_stop();
6932		return (ret);
6933	}
6934	/* Call the special UDP hook. */
6935	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6936	    sctp_recv_udp_tunneled_packet))) {
6937		sctp_over_udp_stop();
6938		return (ret);
6939	}
6940	/* Ok, we have a socket, bind it to the port. */
6941	memset(&sin, 0, sizeof(struct sockaddr_in));
6942	sin.sin_len = sizeof(struct sockaddr_in);
6943	sin.sin_family = AF_INET;
6944	sin.sin_port = htons(port);
6945	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6946	    (struct sockaddr *)&sin, curthread))) {
6947		sctp_over_udp_stop();
6948		return (ret);
6949	}
6950#endif
6951#ifdef INET6
6952	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
6953	    SOCK_DGRAM, IPPROTO_UDP,
6954	    curthread->td_ucred, curthread))) {
6955		sctp_over_udp_stop();
6956		return (ret);
6957	}
6958	/* Call the special UDP hook. */
6959	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
6960	    sctp_recv_udp_tunneled_packet))) {
6961		sctp_over_udp_stop();
6962		return (ret);
6963	}
6964	/* Ok, we have a socket, bind it to the port. */
6965	memset(&sin6, 0, sizeof(struct sockaddr_in6));
6966	sin6.sin6_len = sizeof(struct sockaddr_in6);
6967	sin6.sin6_family = AF_INET6;
6968	sin6.sin6_port = htons(port);
6969	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
6970	    (struct sockaddr *)&sin6, curthread))) {
6971		sctp_over_udp_stop();
6972		return (ret);
6973	}
6974#endif
6975	return (0);
6976}
6977