sctputil.c revision 258454
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctputil.c 258454 2013-11-21 23:00:09Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctputil.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
41#ifdef INET6
42#include <netinet6/sctp6_var.h>
43#endif
44#include <netinet/sctp_header.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_asconf.h>
51#include <netinet/sctp_bsd_addr.h>
52#include <netinet/udp.h>
53#include <netinet/udp_var.h>
54#include <sys/proc.h>
55
56
57#ifndef KTR_SCTP
58#define KTR_SCTP KTR_SUBSYS
59#endif
60
61extern struct sctp_cc_functions sctp_cc_functions[];
62extern struct sctp_ss_functions sctp_ss_functions[];
63
64void
65sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66{
67	struct sctp_cwnd_log sctp_clog;
68
69	sctp_clog.x.sb.stcb = stcb;
70	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71	if (stcb)
72		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73	else
74		sctp_clog.x.sb.stcb_sbcc = 0;
75	sctp_clog.x.sb.incr = incr;
76	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77	    SCTP_LOG_EVENT_SB,
78	    from,
79	    sctp_clog.x.misc.log1,
80	    sctp_clog.x.misc.log2,
81	    sctp_clog.x.misc.log3,
82	    sctp_clog.x.misc.log4);
83}
84
85void
86sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87{
88	struct sctp_cwnd_log sctp_clog;
89
90	sctp_clog.x.close.inp = (void *)inp;
91	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92	if (stcb) {
93		sctp_clog.x.close.stcb = (void *)stcb;
94		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95	} else {
96		sctp_clog.x.close.stcb = 0;
97		sctp_clog.x.close.state = 0;
98	}
99	sctp_clog.x.close.loc = loc;
100	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101	    SCTP_LOG_EVENT_CLOSE,
102	    0,
103	    sctp_clog.x.misc.log1,
104	    sctp_clog.x.misc.log2,
105	    sctp_clog.x.misc.log3,
106	    sctp_clog.x.misc.log4);
107}
108
109void
110rto_logging(struct sctp_nets *net, int from)
111{
112	struct sctp_cwnd_log sctp_clog;
113
114	memset(&sctp_clog, 0, sizeof(sctp_clog));
115	sctp_clog.x.rto.net = (void *)net;
116	sctp_clog.x.rto.rtt = net->rtt / 1000;
117	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118	    SCTP_LOG_EVENT_RTT,
119	    from,
120	    sctp_clog.x.misc.log1,
121	    sctp_clog.x.misc.log2,
122	    sctp_clog.x.misc.log3,
123	    sctp_clog.x.misc.log4);
124}
125
126void
127sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128{
129	struct sctp_cwnd_log sctp_clog;
130
131	sctp_clog.x.strlog.stcb = stcb;
132	sctp_clog.x.strlog.n_tsn = tsn;
133	sctp_clog.x.strlog.n_sseq = sseq;
134	sctp_clog.x.strlog.e_tsn = 0;
135	sctp_clog.x.strlog.e_sseq = 0;
136	sctp_clog.x.strlog.strm = stream;
137	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138	    SCTP_LOG_EVENT_STRM,
139	    from,
140	    sctp_clog.x.misc.log1,
141	    sctp_clog.x.misc.log2,
142	    sctp_clog.x.misc.log3,
143	    sctp_clog.x.misc.log4);
144}
145
146void
147sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148{
149	struct sctp_cwnd_log sctp_clog;
150
151	sctp_clog.x.nagle.stcb = (void *)stcb;
152	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157	    SCTP_LOG_EVENT_NAGLE,
158	    action,
159	    sctp_clog.x.misc.log1,
160	    sctp_clog.x.misc.log2,
161	    sctp_clog.x.misc.log3,
162	    sctp_clog.x.misc.log4);
163}
164
165void
166sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167{
168	struct sctp_cwnd_log sctp_clog;
169
170	sctp_clog.x.sack.cumack = cumack;
171	sctp_clog.x.sack.oldcumack = old_cumack;
172	sctp_clog.x.sack.tsn = tsn;
173	sctp_clog.x.sack.numGaps = gaps;
174	sctp_clog.x.sack.numDups = dups;
175	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176	    SCTP_LOG_EVENT_SACK,
177	    from,
178	    sctp_clog.x.misc.log1,
179	    sctp_clog.x.misc.log2,
180	    sctp_clog.x.misc.log3,
181	    sctp_clog.x.misc.log4);
182}
183
184void
185sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186{
187	struct sctp_cwnd_log sctp_clog;
188
189	memset(&sctp_clog, 0, sizeof(sctp_clog));
190	sctp_clog.x.map.base = map;
191	sctp_clog.x.map.cum = cum;
192	sctp_clog.x.map.high = high;
193	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194	    SCTP_LOG_EVENT_MAP,
195	    from,
196	    sctp_clog.x.misc.log1,
197	    sctp_clog.x.misc.log2,
198	    sctp_clog.x.misc.log3,
199	    sctp_clog.x.misc.log4);
200}
201
202void
203sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204{
205	struct sctp_cwnd_log sctp_clog;
206
207	memset(&sctp_clog, 0, sizeof(sctp_clog));
208	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210	sctp_clog.x.fr.tsn = tsn;
211	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212	    SCTP_LOG_EVENT_FR,
213	    from,
214	    sctp_clog.x.misc.log1,
215	    sctp_clog.x.misc.log2,
216	    sctp_clog.x.misc.log3,
217	    sctp_clog.x.misc.log4);
218}
219
220void
221sctp_log_mb(struct mbuf *m, int from)
222{
223	struct sctp_cwnd_log sctp_clog;
224
225	sctp_clog.x.mb.mp = m;
226	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
227	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
228	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
229	if (SCTP_BUF_IS_EXTENDED(m)) {
230		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
231		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
232	} else {
233		sctp_clog.x.mb.ext = 0;
234		sctp_clog.x.mb.refcnt = 0;
235	}
236	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
237	    SCTP_LOG_EVENT_MBUF,
238	    from,
239	    sctp_clog.x.misc.log1,
240	    sctp_clog.x.misc.log2,
241	    sctp_clog.x.misc.log3,
242	    sctp_clog.x.misc.log4);
243}
244
245void
246sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
247{
248	struct sctp_cwnd_log sctp_clog;
249
250	if (control == NULL) {
251		SCTP_PRINTF("Gak log of NULL?\n");
252		return;
253	}
254	sctp_clog.x.strlog.stcb = control->stcb;
255	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
256	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
257	sctp_clog.x.strlog.strm = control->sinfo_stream;
258	if (poschk != NULL) {
259		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
260		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
261	} else {
262		sctp_clog.x.strlog.e_tsn = 0;
263		sctp_clog.x.strlog.e_sseq = 0;
264	}
265	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
266	    SCTP_LOG_EVENT_STRM,
267	    from,
268	    sctp_clog.x.misc.log1,
269	    sctp_clog.x.misc.log2,
270	    sctp_clog.x.misc.log3,
271	    sctp_clog.x.misc.log4);
272}
273
274void
275sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
276{
277	struct sctp_cwnd_log sctp_clog;
278
279	sctp_clog.x.cwnd.net = net;
280	if (stcb->asoc.send_queue_cnt > 255)
281		sctp_clog.x.cwnd.cnt_in_send = 255;
282	else
283		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284	if (stcb->asoc.stream_queue_cnt > 255)
285		sctp_clog.x.cwnd.cnt_in_str = 255;
286	else
287		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288
289	if (net) {
290		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
291		sctp_clog.x.cwnd.inflight = net->flight_size;
292		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
293		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295	}
296	if (SCTP_CWNDLOG_PRESEND == from) {
297		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298	}
299	sctp_clog.x.cwnd.cwnd_augment = augment;
300	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
301	    SCTP_LOG_EVENT_CWND,
302	    from,
303	    sctp_clog.x.misc.log1,
304	    sctp_clog.x.misc.log2,
305	    sctp_clog.x.misc.log3,
306	    sctp_clog.x.misc.log4);
307}
308
309void
310sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
311{
312	struct sctp_cwnd_log sctp_clog;
313
314	memset(&sctp_clog, 0, sizeof(sctp_clog));
315	if (inp) {
316		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
317
318	} else {
319		sctp_clog.x.lock.sock = (void *)NULL;
320	}
321	sctp_clog.x.lock.inp = (void *)inp;
322	if (stcb) {
323		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
324	} else {
325		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
326	}
327	if (inp) {
328		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
329		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
330	} else {
331		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
332		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
333	}
334	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
335	if (inp && (inp->sctp_socket)) {
336		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
337		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
339	} else {
340		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
341		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
342		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
343	}
344	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345	    SCTP_LOG_LOCK_EVENT,
346	    from,
347	    sctp_clog.x.misc.log1,
348	    sctp_clog.x.misc.log2,
349	    sctp_clog.x.misc.log3,
350	    sctp_clog.x.misc.log4);
351}
352
353void
354sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
355{
356	struct sctp_cwnd_log sctp_clog;
357
358	memset(&sctp_clog, 0, sizeof(sctp_clog));
359	sctp_clog.x.cwnd.net = net;
360	sctp_clog.x.cwnd.cwnd_new_value = error;
361	sctp_clog.x.cwnd.inflight = net->flight_size;
362	sctp_clog.x.cwnd.cwnd_augment = burst;
363	if (stcb->asoc.send_queue_cnt > 255)
364		sctp_clog.x.cwnd.cnt_in_send = 255;
365	else
366		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
367	if (stcb->asoc.stream_queue_cnt > 255)
368		sctp_clog.x.cwnd.cnt_in_str = 255;
369	else
370		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
371	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
372	    SCTP_LOG_EVENT_MAXBURST,
373	    from,
374	    sctp_clog.x.misc.log1,
375	    sctp_clog.x.misc.log2,
376	    sctp_clog.x.misc.log3,
377	    sctp_clog.x.misc.log4);
378}
379
380void
381sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
382{
383	struct sctp_cwnd_log sctp_clog;
384
385	sctp_clog.x.rwnd.rwnd = peers_rwnd;
386	sctp_clog.x.rwnd.send_size = snd_size;
387	sctp_clog.x.rwnd.overhead = overhead;
388	sctp_clog.x.rwnd.new_rwnd = 0;
389	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390	    SCTP_LOG_EVENT_RWND,
391	    from,
392	    sctp_clog.x.misc.log1,
393	    sctp_clog.x.misc.log2,
394	    sctp_clog.x.misc.log3,
395	    sctp_clog.x.misc.log4);
396}
397
398void
399sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
400{
401	struct sctp_cwnd_log sctp_clog;
402
403	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404	sctp_clog.x.rwnd.send_size = flight_size;
405	sctp_clog.x.rwnd.overhead = overhead;
406	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
407	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408	    SCTP_LOG_EVENT_RWND,
409	    from,
410	    sctp_clog.x.misc.log1,
411	    sctp_clog.x.misc.log2,
412	    sctp_clog.x.misc.log3,
413	    sctp_clog.x.misc.log4);
414}
415
416void
417sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
418{
419	struct sctp_cwnd_log sctp_clog;
420
421	sctp_clog.x.mbcnt.total_queue_size = total_oq;
422	sctp_clog.x.mbcnt.size_change = book;
423	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
424	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
425	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426	    SCTP_LOG_EVENT_MBCNT,
427	    from,
428	    sctp_clog.x.misc.log1,
429	    sctp_clog.x.misc.log2,
430	    sctp_clog.x.misc.log3,
431	    sctp_clog.x.misc.log4);
432}
433
434void
435sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
436{
437	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438	    SCTP_LOG_MISC_EVENT,
439	    from,
440	    a, b, c, d);
441}
442
443void
444sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
445{
446	struct sctp_cwnd_log sctp_clog;
447
448	sctp_clog.x.wake.stcb = (void *)stcb;
449	sctp_clog.x.wake.wake_cnt = wake_cnt;
450	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
451	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
452	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
453
454	if (stcb->asoc.stream_queue_cnt < 0xff)
455		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
456	else
457		sctp_clog.x.wake.stream_qcnt = 0xff;
458
459	if (stcb->asoc.chunks_on_out_queue < 0xff)
460		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
461	else
462		sctp_clog.x.wake.chunks_on_oque = 0xff;
463
464	sctp_clog.x.wake.sctpflags = 0;
465	/* set in the defered mode stuff */
466	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
467		sctp_clog.x.wake.sctpflags |= 1;
468	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
469		sctp_clog.x.wake.sctpflags |= 2;
470	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
471		sctp_clog.x.wake.sctpflags |= 4;
472	/* what about the sb */
473	if (stcb->sctp_socket) {
474		struct socket *so = stcb->sctp_socket;
475
476		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
477	} else {
478		sctp_clog.x.wake.sbflags = 0xff;
479	}
480	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481	    SCTP_LOG_EVENT_WAKE,
482	    from,
483	    sctp_clog.x.misc.log1,
484	    sctp_clog.x.misc.log2,
485	    sctp_clog.x.misc.log3,
486	    sctp_clog.x.misc.log4);
487}
488
489void
490sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
491{
492	struct sctp_cwnd_log sctp_clog;
493
494	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
495	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
496	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
497	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
498	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
499	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
500	sctp_clog.x.blk.sndlen = sendlen;
501	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502	    SCTP_LOG_EVENT_BLOCK,
503	    from,
504	    sctp_clog.x.misc.log1,
505	    sctp_clog.x.misc.log2,
506	    sctp_clog.x.misc.log3,
507	    sctp_clog.x.misc.log4);
508}
509
510int
511sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
512{
513	/* May need to fix this if ktrdump does not work */
514	return (0);
515}
516
517#ifdef SCTP_AUDITING_ENABLED
518uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
519static int sctp_audit_indx = 0;
520
521static
522void
523sctp_print_audit_report(void)
524{
525	int i;
526	int cnt;
527
528	cnt = 0;
529	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
530		if ((sctp_audit_data[i][0] == 0xe0) &&
531		    (sctp_audit_data[i][1] == 0x01)) {
532			cnt = 0;
533			SCTP_PRINTF("\n");
534		} else if (sctp_audit_data[i][0] == 0xf0) {
535			cnt = 0;
536			SCTP_PRINTF("\n");
537		} else if ((sctp_audit_data[i][0] == 0xc0) &&
538		    (sctp_audit_data[i][1] == 0x01)) {
539			SCTP_PRINTF("\n");
540			cnt = 0;
541		}
542		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
543		    (uint32_t) sctp_audit_data[i][1]);
544		cnt++;
545		if ((cnt % 14) == 0)
546			SCTP_PRINTF("\n");
547	}
548	for (i = 0; i < sctp_audit_indx; i++) {
549		if ((sctp_audit_data[i][0] == 0xe0) &&
550		    (sctp_audit_data[i][1] == 0x01)) {
551			cnt = 0;
552			SCTP_PRINTF("\n");
553		} else if (sctp_audit_data[i][0] == 0xf0) {
554			cnt = 0;
555			SCTP_PRINTF("\n");
556		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557		    (sctp_audit_data[i][1] == 0x01)) {
558			SCTP_PRINTF("\n");
559			cnt = 0;
560		}
561		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562		    (uint32_t) sctp_audit_data[i][1]);
563		cnt++;
564		if ((cnt % 14) == 0)
565			SCTP_PRINTF("\n");
566	}
567	SCTP_PRINTF("\n");
568}
569
570void
571sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
572    struct sctp_nets *net)
573{
574	int resend_cnt, tot_out, rep, tot_book_cnt;
575	struct sctp_nets *lnet;
576	struct sctp_tmit_chunk *chk;
577
578	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
579	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
580	sctp_audit_indx++;
581	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
582		sctp_audit_indx = 0;
583	}
584	if (inp == NULL) {
585		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
586		sctp_audit_data[sctp_audit_indx][1] = 0x01;
587		sctp_audit_indx++;
588		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
589			sctp_audit_indx = 0;
590		}
591		return;
592	}
593	if (stcb == NULL) {
594		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595		sctp_audit_data[sctp_audit_indx][1] = 0x02;
596		sctp_audit_indx++;
597		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598			sctp_audit_indx = 0;
599		}
600		return;
601	}
602	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
603	sctp_audit_data[sctp_audit_indx][1] =
604	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
605	sctp_audit_indx++;
606	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607		sctp_audit_indx = 0;
608	}
609	rep = 0;
610	tot_book_cnt = 0;
611	resend_cnt = tot_out = 0;
612	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
613		if (chk->sent == SCTP_DATAGRAM_RESEND) {
614			resend_cnt++;
615		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
616			tot_out += chk->book_size;
617			tot_book_cnt++;
618		}
619	}
620	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
621		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
622		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
623		sctp_audit_indx++;
624		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625			sctp_audit_indx = 0;
626		}
627		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
628		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
629		rep = 1;
630		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
631		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
632		sctp_audit_data[sctp_audit_indx][1] =
633		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
634		sctp_audit_indx++;
635		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636			sctp_audit_indx = 0;
637		}
638	}
639	if (tot_out != stcb->asoc.total_flight) {
640		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
642		sctp_audit_indx++;
643		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644			sctp_audit_indx = 0;
645		}
646		rep = 1;
647		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
648		    (int)stcb->asoc.total_flight);
649		stcb->asoc.total_flight = tot_out;
650	}
651	if (tot_book_cnt != stcb->asoc.total_flight_count) {
652		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
654		sctp_audit_indx++;
655		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656			sctp_audit_indx = 0;
657		}
658		rep = 1;
659		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
660
661		stcb->asoc.total_flight_count = tot_book_cnt;
662	}
663	tot_out = 0;
664	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
665		tot_out += lnet->flight_size;
666	}
667	if (tot_out != stcb->asoc.total_flight) {
668		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
670		sctp_audit_indx++;
671		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672			sctp_audit_indx = 0;
673		}
674		rep = 1;
675		SCTP_PRINTF("real flight:%d net total was %d\n",
676		    stcb->asoc.total_flight, tot_out);
677		/* now corrective action */
678		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679
680			tot_out = 0;
681			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682				if ((chk->whoTo == lnet) &&
683				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
684					tot_out += chk->book_size;
685				}
686			}
687			if (lnet->flight_size != tot_out) {
688				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
689				    (void *)lnet, lnet->flight_size,
690				    tot_out);
691				lnet->flight_size = tot_out;
692			}
693		}
694	}
695	if (rep) {
696		sctp_print_audit_report();
697	}
698}
699
700void
701sctp_audit_log(uint8_t ev, uint8_t fd)
702{
703
704	sctp_audit_data[sctp_audit_indx][0] = ev;
705	sctp_audit_data[sctp_audit_indx][1] = fd;
706	sctp_audit_indx++;
707	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708		sctp_audit_indx = 0;
709	}
710}
711
712#endif
713
714/*
715 * sctp_stop_timers_for_shutdown() should be called
716 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
717 * state to make sure that all timers are stopped.
718 */
719void
720sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
721{
722	struct sctp_association *asoc;
723	struct sctp_nets *net;
724
725	asoc = &stcb->asoc;
726
727	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
728	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
729	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
730	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
731	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
732	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
733		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
734		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
735	}
736}
737
738/*
739 * a list of sizes based on typical mtu's, used only if next hop size not
740 * returned.
741 */
742static uint32_t sctp_mtu_sizes[] = {
743	68,
744	296,
745	508,
746	512,
747	544,
748	576,
749	1006,
750	1492,
751	1500,
752	1536,
753	2002,
754	2048,
755	4352,
756	4464,
757	8166,
758	17914,
759	32000,
760	65535
761};
762
763/*
764 * Return the largest MTU smaller than val. If there is no
765 * entry, just return val.
766 */
767uint32_t
768sctp_get_prev_mtu(uint32_t val)
769{
770	uint32_t i;
771
772	if (val <= sctp_mtu_sizes[0]) {
773		return (val);
774	}
775	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
776		if (val <= sctp_mtu_sizes[i]) {
777			break;
778		}
779	}
780	return (sctp_mtu_sizes[i - 1]);
781}
782
783/*
784 * Return the smallest MTU larger than val. If there is no
785 * entry, just return val.
786 */
787uint32_t
788sctp_get_next_mtu(uint32_t val)
789{
790	/* select another MTU that is just bigger than this one */
791	uint32_t i;
792
793	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
794		if (val < sctp_mtu_sizes[i]) {
795			return (sctp_mtu_sizes[i]);
796		}
797	}
798	return (val);
799}
800
801void
802sctp_fill_random_store(struct sctp_pcb *m)
803{
804	/*
805	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
806	 * our counter. The result becomes our good random numbers and we
807	 * then setup to give these out. Note that we do no locking to
808	 * protect this. This is ok, since if competing folks call this we
809	 * will get more gobbled gook in the random store which is what we
810	 * want. There is a danger that two guys will use the same random
811	 * numbers, but thats ok too since that is random as well :->
812	 */
813	m->store_at = 0;
814	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
815	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
816	    sizeof(m->random_counter), (uint8_t *) m->random_store);
817	m->random_counter++;
818}
819
820uint32_t
821sctp_select_initial_TSN(struct sctp_pcb *inp)
822{
823	/*
824	 * A true implementation should use random selection process to get
825	 * the initial stream sequence number, using RFC1750 as a good
826	 * guideline
827	 */
828	uint32_t x, *xp;
829	uint8_t *p;
830	int store_at, new_store;
831
832	if (inp->initial_sequence_debug != 0) {
833		uint32_t ret;
834
835		ret = inp->initial_sequence_debug;
836		inp->initial_sequence_debug++;
837		return (ret);
838	}
839retry:
840	store_at = inp->store_at;
841	new_store = store_at + sizeof(uint32_t);
842	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
843		new_store = 0;
844	}
845	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
846		goto retry;
847	}
848	if (new_store == 0) {
849		/* Refill the random store */
850		sctp_fill_random_store(inp);
851	}
852	p = &inp->random_store[store_at];
853	xp = (uint32_t *) p;
854	x = *xp;
855	return (x);
856}
857
858uint32_t
859sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
860{
861	uint32_t x;
862	struct timeval now;
863
864	if (check) {
865		(void)SCTP_GETTIME_TIMEVAL(&now);
866	}
867	for (;;) {
868		x = sctp_select_initial_TSN(&inp->sctp_ep);
869		if (x == 0) {
870			/* we never use 0 */
871			continue;
872		}
873		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
874			break;
875		}
876	}
877	return (x);
878}
879
880int
881sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
882    uint32_t override_tag, uint32_t vrf_id)
883{
884	struct sctp_association *asoc;
885
886	/*
887	 * Anything set to zero is taken care of by the allocation routine's
888	 * bzero
889	 */
890
891	/*
892	 * Up front select what scoping to apply on addresses I tell my peer
893	 * Not sure what to do with these right now, we will need to come up
894	 * with a way to set them. We may need to pass them through from the
895	 * caller in the sctp_aloc_assoc() function.
896	 */
897	int i;
898
899	asoc = &stcb->asoc;
900	/* init all variables to a known value. */
901	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
902	asoc->max_burst = inp->sctp_ep.max_burst;
903	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
904	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
905	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
906	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
907	asoc->ecn_allowed = inp->sctp_ecn_enable;
908	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
909	asoc->sctp_cmt_pf = (uint8_t) 0;
910	asoc->sctp_frag_point = inp->sctp_frag_point;
911	asoc->sctp_features = inp->sctp_features;
912	asoc->default_dscp = inp->sctp_ep.default_dscp;
913#ifdef INET6
914	if (inp->sctp_ep.default_flowlabel) {
915		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
916	} else {
917		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
918			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
919			asoc->default_flowlabel &= 0x000fffff;
920			asoc->default_flowlabel |= 0x80000000;
921		} else {
922			asoc->default_flowlabel = 0;
923		}
924	}
925#endif
926	asoc->sb_send_resv = 0;
927	if (override_tag) {
928		asoc->my_vtag = override_tag;
929	} else {
930		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
931	}
932	/* Get the nonce tags */
933	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
934	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
935	asoc->vrf_id = vrf_id;
936
937#ifdef SCTP_ASOCLOG_OF_TSNS
938	asoc->tsn_in_at = 0;
939	asoc->tsn_out_at = 0;
940	asoc->tsn_in_wrapped = 0;
941	asoc->tsn_out_wrapped = 0;
942	asoc->cumack_log_at = 0;
943	asoc->cumack_log_atsnt = 0;
944#endif
945#ifdef SCTP_FS_SPEC_LOG
946	asoc->fs_index = 0;
947#endif
948	asoc->refcnt = 0;
949	asoc->assoc_up_sent = 0;
950	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
951	    sctp_select_initial_TSN(&inp->sctp_ep);
952	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
953	/* we are optimisitic here */
954	asoc->peer_supports_pktdrop = 1;
955	asoc->peer_supports_nat = 0;
956	asoc->sent_queue_retran_cnt = 0;
957
958	/* for CMT */
959	asoc->last_net_cmt_send_started = NULL;
960
961	/* This will need to be adjusted */
962	asoc->last_acked_seq = asoc->init_seq_number - 1;
963	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
964	asoc->asconf_seq_in = asoc->last_acked_seq;
965
966	/* here we are different, we hold the next one we expect */
967	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
968
969	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
970	asoc->initial_rto = inp->sctp_ep.initial_rto;
971
972	asoc->max_init_times = inp->sctp_ep.max_init_times;
973	asoc->max_send_times = inp->sctp_ep.max_send_times;
974	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
975	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
976	asoc->free_chunk_cnt = 0;
977
978	asoc->iam_blocking = 0;
979	asoc->context = inp->sctp_context;
980	asoc->local_strreset_support = inp->local_strreset_support;
981	asoc->def_send = inp->def_send;
982	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
983	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
984	asoc->pr_sctp_cnt = 0;
985	asoc->total_output_queue_size = 0;
986
987	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
988		asoc->scope.ipv6_addr_legal = 1;
989		if (SCTP_IPV6_V6ONLY(inp) == 0) {
990			asoc->scope.ipv4_addr_legal = 1;
991		} else {
992			asoc->scope.ipv4_addr_legal = 0;
993		}
994	} else {
995		asoc->scope.ipv6_addr_legal = 0;
996		asoc->scope.ipv4_addr_legal = 1;
997	}
998
999	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1000	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1001
1002	asoc->smallest_mtu = inp->sctp_frag_point;
1003	asoc->minrto = inp->sctp_ep.sctp_minrto;
1004	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1005
1006	asoc->locked_on_sending = NULL;
1007	asoc->stream_locked_on = 0;
1008	asoc->ecn_echo_cnt_onq = 0;
1009	asoc->stream_locked = 0;
1010
1011	asoc->send_sack = 1;
1012
1013	LIST_INIT(&asoc->sctp_restricted_addrs);
1014
1015	TAILQ_INIT(&asoc->nets);
1016	TAILQ_INIT(&asoc->pending_reply_queue);
1017	TAILQ_INIT(&asoc->asconf_ack_sent);
1018	/* Setup to fill the hb random cache at first HB */
1019	asoc->hb_random_idx = 4;
1020
1021	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1022
1023	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1024	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1025
1026	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1027	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1028
1029	/*
1030	 * Now the stream parameters, here we allocate space for all streams
1031	 * that we request by default.
1032	 */
1033	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1034	    inp->sctp_ep.pre_open_stream_count;
1035	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1036	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1037	    SCTP_M_STRMO);
1038	if (asoc->strmout == NULL) {
1039		/* big trouble no memory */
1040		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1041		return (ENOMEM);
1042	}
1043	for (i = 0; i < asoc->streamoutcnt; i++) {
1044		/*
1045		 * inbound side must be set to 0xffff, also NOTE when we get
1046		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1047		 * count (streamoutcnt) but first check if we sent to any of
1048		 * the upper streams that were dropped (if some were). Those
1049		 * that were dropped must be notified to the upper layer as
1050		 * failed to send.
1051		 */
1052		asoc->strmout[i].next_sequence_send = 0x0;
1053		TAILQ_INIT(&asoc->strmout[i].outqueue);
1054		asoc->strmout[i].chunks_on_queues = 0;
1055		asoc->strmout[i].stream_no = i;
1056		asoc->strmout[i].last_msg_incomplete = 0;
1057		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1058	}
1059	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1060
1061	/* Now the mapping array */
1062	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1063	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1064	    SCTP_M_MAP);
1065	if (asoc->mapping_array == NULL) {
1066		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1067		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1068		return (ENOMEM);
1069	}
1070	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1071	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1072	    SCTP_M_MAP);
1073	if (asoc->nr_mapping_array == NULL) {
1074		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1075		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1076		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1077		return (ENOMEM);
1078	}
1079	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1080
1081	/* Now the init of the other outqueues */
1082	TAILQ_INIT(&asoc->free_chunks);
1083	TAILQ_INIT(&asoc->control_send_queue);
1084	TAILQ_INIT(&asoc->asconf_send_queue);
1085	TAILQ_INIT(&asoc->send_queue);
1086	TAILQ_INIT(&asoc->sent_queue);
1087	TAILQ_INIT(&asoc->reasmqueue);
1088	TAILQ_INIT(&asoc->resetHead);
1089	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1090	TAILQ_INIT(&asoc->asconf_queue);
1091	/* authentication fields */
1092	asoc->authinfo.random = NULL;
1093	asoc->authinfo.active_keyid = 0;
1094	asoc->authinfo.assoc_key = NULL;
1095	asoc->authinfo.assoc_keyid = 0;
1096	asoc->authinfo.recv_key = NULL;
1097	asoc->authinfo.recv_keyid = 0;
1098	LIST_INIT(&asoc->shared_keys);
1099	asoc->marked_retrans = 0;
1100	asoc->port = inp->sctp_ep.port;
1101	asoc->timoinit = 0;
1102	asoc->timodata = 0;
1103	asoc->timosack = 0;
1104	asoc->timoshutdown = 0;
1105	asoc->timoheartbeat = 0;
1106	asoc->timocookie = 0;
1107	asoc->timoshutdownack = 0;
1108	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1109	asoc->discontinuity_time = asoc->start_time;
1110	/*
1111	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1112	 * freed later when the association is freed.
1113	 */
1114	return (0);
1115}
1116
1117void
1118sctp_print_mapping_array(struct sctp_association *asoc)
1119{
1120	unsigned int i, limit;
1121
1122	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1123	    asoc->mapping_array_size,
1124	    asoc->mapping_array_base_tsn,
1125	    asoc->cumulative_tsn,
1126	    asoc->highest_tsn_inside_map,
1127	    asoc->highest_tsn_inside_nr_map);
1128	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1129		if (asoc->mapping_array[limit - 1] != 0) {
1130			break;
1131		}
1132	}
1133	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1134	for (i = 0; i < limit; i++) {
1135		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1136	}
1137	if (limit % 16)
1138		SCTP_PRINTF("\n");
1139	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1140		if (asoc->nr_mapping_array[limit - 1]) {
1141			break;
1142		}
1143	}
1144	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1145	for (i = 0; i < limit; i++) {
1146		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1147	}
1148	if (limit % 16)
1149		SCTP_PRINTF("\n");
1150}
1151
1152int
1153sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1154{
1155	/* mapping array needs to grow */
1156	uint8_t *new_array1, *new_array2;
1157	uint32_t new_size;
1158
1159	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1160	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1161	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1162	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1163		/* can't get more, forget it */
1164		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1165		if (new_array1) {
1166			SCTP_FREE(new_array1, SCTP_M_MAP);
1167		}
1168		if (new_array2) {
1169			SCTP_FREE(new_array2, SCTP_M_MAP);
1170		}
1171		return (-1);
1172	}
1173	memset(new_array1, 0, new_size);
1174	memset(new_array2, 0, new_size);
1175	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1176	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1177	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1178	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1179	asoc->mapping_array = new_array1;
1180	asoc->nr_mapping_array = new_array2;
1181	asoc->mapping_array_size = new_size;
1182	return (0);
1183}
1184
1185
1186static void
1187sctp_iterator_work(struct sctp_iterator *it)
1188{
1189	int iteration_count = 0;
1190	int inp_skip = 0;
1191	int first_in = 1;
1192	struct sctp_inpcb *tinp;
1193
1194	SCTP_INP_INFO_RLOCK();
1195	SCTP_ITERATOR_LOCK();
1196	if (it->inp) {
1197		SCTP_INP_RLOCK(it->inp);
1198		SCTP_INP_DECR_REF(it->inp);
1199	}
1200	if (it->inp == NULL) {
1201		/* iterator is complete */
1202done_with_iterator:
1203		SCTP_ITERATOR_UNLOCK();
1204		SCTP_INP_INFO_RUNLOCK();
1205		if (it->function_atend != NULL) {
1206			(*it->function_atend) (it->pointer, it->val);
1207		}
1208		SCTP_FREE(it, SCTP_M_ITER);
1209		return;
1210	}
1211select_a_new_ep:
1212	if (first_in) {
1213		first_in = 0;
1214	} else {
1215		SCTP_INP_RLOCK(it->inp);
1216	}
1217	while (((it->pcb_flags) &&
1218	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1219	    ((it->pcb_features) &&
1220	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1221		/* endpoint flags or features don't match, so keep looking */
1222		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1223			SCTP_INP_RUNLOCK(it->inp);
1224			goto done_with_iterator;
1225		}
1226		tinp = it->inp;
1227		it->inp = LIST_NEXT(it->inp, sctp_list);
1228		SCTP_INP_RUNLOCK(tinp);
1229		if (it->inp == NULL) {
1230			goto done_with_iterator;
1231		}
1232		SCTP_INP_RLOCK(it->inp);
1233	}
1234	/* now go through each assoc which is in the desired state */
1235	if (it->done_current_ep == 0) {
1236		if (it->function_inp != NULL)
1237			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1238		it->done_current_ep = 1;
1239	}
1240	if (it->stcb == NULL) {
1241		/* run the per instance function */
1242		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1243	}
1244	if ((inp_skip) || it->stcb == NULL) {
1245		if (it->function_inp_end != NULL) {
1246			inp_skip = (*it->function_inp_end) (it->inp,
1247			    it->pointer,
1248			    it->val);
1249		}
1250		SCTP_INP_RUNLOCK(it->inp);
1251		goto no_stcb;
1252	}
1253	while (it->stcb) {
1254		SCTP_TCB_LOCK(it->stcb);
1255		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1256			/* not in the right state... keep looking */
1257			SCTP_TCB_UNLOCK(it->stcb);
1258			goto next_assoc;
1259		}
1260		/* see if we have limited out the iterator loop */
1261		iteration_count++;
1262		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1263			/* Pause to let others grab the lock */
1264			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1265			SCTP_TCB_UNLOCK(it->stcb);
1266			SCTP_INP_INCR_REF(it->inp);
1267			SCTP_INP_RUNLOCK(it->inp);
1268			SCTP_ITERATOR_UNLOCK();
1269			SCTP_INP_INFO_RUNLOCK();
1270			SCTP_INP_INFO_RLOCK();
1271			SCTP_ITERATOR_LOCK();
1272			if (sctp_it_ctl.iterator_flags) {
1273				/* We won't be staying here */
1274				SCTP_INP_DECR_REF(it->inp);
1275				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1276				if (sctp_it_ctl.iterator_flags &
1277				    SCTP_ITERATOR_STOP_CUR_IT) {
1278					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1279					goto done_with_iterator;
1280				}
1281				if (sctp_it_ctl.iterator_flags &
1282				    SCTP_ITERATOR_STOP_CUR_INP) {
1283					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1284					goto no_stcb;
1285				}
1286				/* If we reach here huh? */
1287				SCTP_PRINTF("Unknown it ctl flag %x\n",
1288				    sctp_it_ctl.iterator_flags);
1289				sctp_it_ctl.iterator_flags = 0;
1290			}
1291			SCTP_INP_RLOCK(it->inp);
1292			SCTP_INP_DECR_REF(it->inp);
1293			SCTP_TCB_LOCK(it->stcb);
1294			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1295			iteration_count = 0;
1296		}
1297		/* run function on this one */
1298		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1299
1300		/*
1301		 * we lie here, it really needs to have its own type but
1302		 * first I must verify that this won't effect things :-0
1303		 */
1304		if (it->no_chunk_output == 0)
1305			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1306
1307		SCTP_TCB_UNLOCK(it->stcb);
1308next_assoc:
1309		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1310		if (it->stcb == NULL) {
1311			/* Run last function */
1312			if (it->function_inp_end != NULL) {
1313				inp_skip = (*it->function_inp_end) (it->inp,
1314				    it->pointer,
1315				    it->val);
1316			}
1317		}
1318	}
1319	SCTP_INP_RUNLOCK(it->inp);
1320no_stcb:
1321	/* done with all assocs on this endpoint, move on to next endpoint */
1322	it->done_current_ep = 0;
1323	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1324		it->inp = NULL;
1325	} else {
1326		it->inp = LIST_NEXT(it->inp, sctp_list);
1327	}
1328	if (it->inp == NULL) {
1329		goto done_with_iterator;
1330	}
1331	goto select_a_new_ep;
1332}
1333
1334void
1335sctp_iterator_worker(void)
1336{
1337	struct sctp_iterator *it, *nit;
1338
1339	/* This function is called with the WQ lock in place */
1340
1341	sctp_it_ctl.iterator_running = 1;
1342	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1343		sctp_it_ctl.cur_it = it;
1344		/* now lets work on this one */
1345		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1346		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1347		CURVNET_SET(it->vn);
1348		sctp_iterator_work(it);
1349		sctp_it_ctl.cur_it = NULL;
1350		CURVNET_RESTORE();
1351		SCTP_IPI_ITERATOR_WQ_LOCK();
1352		/* sa_ignore FREED_MEMORY */
1353	}
1354	sctp_it_ctl.iterator_running = 0;
1355	return;
1356}
1357
1358
1359static void
1360sctp_handle_addr_wq(void)
1361{
1362	/* deal with the ADDR wq from the rtsock calls */
1363	struct sctp_laddr *wi, *nwi;
1364	struct sctp_asconf_iterator *asc;
1365
1366	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1367	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1368	if (asc == NULL) {
1369		/* Try later, no memory */
1370		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1371		    (struct sctp_inpcb *)NULL,
1372		    (struct sctp_tcb *)NULL,
1373		    (struct sctp_nets *)NULL);
1374		return;
1375	}
1376	LIST_INIT(&asc->list_of_work);
1377	asc->cnt = 0;
1378
1379	SCTP_WQ_ADDR_LOCK();
1380	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1381		LIST_REMOVE(wi, sctp_nxt_addr);
1382		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1383		asc->cnt++;
1384	}
1385	SCTP_WQ_ADDR_UNLOCK();
1386
1387	if (asc->cnt == 0) {
1388		SCTP_FREE(asc, SCTP_M_ASC_IT);
1389	} else {
1390		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1391		    sctp_asconf_iterator_stcb,
1392		    NULL,	/* No ep end for boundall */
1393		    SCTP_PCB_FLAGS_BOUNDALL,
1394		    SCTP_PCB_ANY_FEATURES,
1395		    SCTP_ASOC_ANY_STATE,
1396		    (void *)asc, 0,
1397		    sctp_asconf_iterator_end, NULL, 0);
1398	}
1399}
1400
1401void
1402sctp_timeout_handler(void *t)
1403{
1404	struct sctp_inpcb *inp;
1405	struct sctp_tcb *stcb;
1406	struct sctp_nets *net;
1407	struct sctp_timer *tmr;
1408
1409#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1410	struct socket *so;
1411
1412#endif
1413	int did_output, type;
1414
1415	tmr = (struct sctp_timer *)t;
1416	inp = (struct sctp_inpcb *)tmr->ep;
1417	stcb = (struct sctp_tcb *)tmr->tcb;
1418	net = (struct sctp_nets *)tmr->net;
1419	CURVNET_SET((struct vnet *)tmr->vnet);
1420	did_output = 1;
1421
1422#ifdef SCTP_AUDITING_ENABLED
1423	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1424	sctp_auditing(3, inp, stcb, net);
1425#endif
1426
1427	/* sanity checks... */
1428	if (tmr->self != (void *)tmr) {
1429		/*
1430		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1431		 * (void *)tmr);
1432		 */
1433		CURVNET_RESTORE();
1434		return;
1435	}
1436	tmr->stopped_from = 0xa001;
1437	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1438		/*
1439		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1440		 * tmr->type);
1441		 */
1442		CURVNET_RESTORE();
1443		return;
1444	}
1445	tmr->stopped_from = 0xa002;
1446	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1447		CURVNET_RESTORE();
1448		return;
1449	}
1450	/* if this is an iterator timeout, get the struct and clear inp */
1451	tmr->stopped_from = 0xa003;
1452	type = tmr->type;
1453	if (inp) {
1454		SCTP_INP_INCR_REF(inp);
1455		if ((inp->sctp_socket == NULL) &&
1456		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1457		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1458		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1459		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1460		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1461		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1462		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1463		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1464		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1465		    ) {
1466			SCTP_INP_DECR_REF(inp);
1467			CURVNET_RESTORE();
1468			return;
1469		}
1470	}
1471	tmr->stopped_from = 0xa004;
1472	if (stcb) {
1473		atomic_add_int(&stcb->asoc.refcnt, 1);
1474		if (stcb->asoc.state == 0) {
1475			atomic_add_int(&stcb->asoc.refcnt, -1);
1476			if (inp) {
1477				SCTP_INP_DECR_REF(inp);
1478			}
1479			CURVNET_RESTORE();
1480			return;
1481		}
1482	}
1483	tmr->stopped_from = 0xa005;
1484	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1485	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1486		if (inp) {
1487			SCTP_INP_DECR_REF(inp);
1488		}
1489		if (stcb) {
1490			atomic_add_int(&stcb->asoc.refcnt, -1);
1491		}
1492		CURVNET_RESTORE();
1493		return;
1494	}
1495	tmr->stopped_from = 0xa006;
1496
1497	if (stcb) {
1498		SCTP_TCB_LOCK(stcb);
1499		atomic_add_int(&stcb->asoc.refcnt, -1);
1500		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1501		    ((stcb->asoc.state == 0) ||
1502		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1503			SCTP_TCB_UNLOCK(stcb);
1504			if (inp) {
1505				SCTP_INP_DECR_REF(inp);
1506			}
1507			CURVNET_RESTORE();
1508			return;
1509		}
1510	}
1511	/* record in stopped what t-o occured */
1512	tmr->stopped_from = tmr->type;
1513
1514	/* mark as being serviced now */
1515	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1516		/*
1517		 * Callout has been rescheduled.
1518		 */
1519		goto get_out;
1520	}
1521	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1522		/*
1523		 * Not active, so no action.
1524		 */
1525		goto get_out;
1526	}
1527	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1528
1529	/* call the handler for the appropriate timer type */
1530	switch (tmr->type) {
1531	case SCTP_TIMER_TYPE_ZERO_COPY:
1532		if (inp == NULL) {
1533			break;
1534		}
1535		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1536			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1537		}
1538		break;
1539	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1540		if (inp == NULL) {
1541			break;
1542		}
1543		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1544			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1545		}
1546		break;
1547	case SCTP_TIMER_TYPE_ADDR_WQ:
1548		sctp_handle_addr_wq();
1549		break;
1550	case SCTP_TIMER_TYPE_SEND:
1551		if ((stcb == NULL) || (inp == NULL)) {
1552			break;
1553		}
1554		SCTP_STAT_INCR(sctps_timodata);
1555		stcb->asoc.timodata++;
1556		stcb->asoc.num_send_timers_up--;
1557		if (stcb->asoc.num_send_timers_up < 0) {
1558			stcb->asoc.num_send_timers_up = 0;
1559		}
1560		SCTP_TCB_LOCK_ASSERT(stcb);
1561		if (sctp_t3rxt_timer(inp, stcb, net)) {
1562			/* no need to unlock on tcb its gone */
1563
1564			goto out_decr;
1565		}
1566		SCTP_TCB_LOCK_ASSERT(stcb);
1567#ifdef SCTP_AUDITING_ENABLED
1568		sctp_auditing(4, inp, stcb, net);
1569#endif
1570		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1571		if ((stcb->asoc.num_send_timers_up == 0) &&
1572		    (stcb->asoc.sent_queue_cnt > 0)) {
1573			struct sctp_tmit_chunk *chk;
1574
1575			/*
1576			 * safeguard. If there on some on the sent queue
1577			 * somewhere but no timers running something is
1578			 * wrong... so we start a timer on the first chunk
1579			 * on the send queue on whatever net it is sent to.
1580			 */
1581			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1582			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1583			    chk->whoTo);
1584		}
1585		break;
1586	case SCTP_TIMER_TYPE_INIT:
1587		if ((stcb == NULL) || (inp == NULL)) {
1588			break;
1589		}
1590		SCTP_STAT_INCR(sctps_timoinit);
1591		stcb->asoc.timoinit++;
1592		if (sctp_t1init_timer(inp, stcb, net)) {
1593			/* no need to unlock on tcb its gone */
1594			goto out_decr;
1595		}
1596		/* We do output but not here */
1597		did_output = 0;
1598		break;
1599	case SCTP_TIMER_TYPE_RECV:
1600		if ((stcb == NULL) || (inp == NULL)) {
1601			break;
1602		}
1603		SCTP_STAT_INCR(sctps_timosack);
1604		stcb->asoc.timosack++;
1605		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1606#ifdef SCTP_AUDITING_ENABLED
1607		sctp_auditing(4, inp, stcb, net);
1608#endif
1609		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1610		break;
1611	case SCTP_TIMER_TYPE_SHUTDOWN:
1612		if ((stcb == NULL) || (inp == NULL)) {
1613			break;
1614		}
1615		if (sctp_shutdown_timer(inp, stcb, net)) {
1616			/* no need to unlock on tcb its gone */
1617			goto out_decr;
1618		}
1619		SCTP_STAT_INCR(sctps_timoshutdown);
1620		stcb->asoc.timoshutdown++;
1621#ifdef SCTP_AUDITING_ENABLED
1622		sctp_auditing(4, inp, stcb, net);
1623#endif
1624		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1625		break;
1626	case SCTP_TIMER_TYPE_HEARTBEAT:
1627		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1628			break;
1629		}
1630		SCTP_STAT_INCR(sctps_timoheartbeat);
1631		stcb->asoc.timoheartbeat++;
1632		if (sctp_heartbeat_timer(inp, stcb, net)) {
1633			/* no need to unlock on tcb its gone */
1634			goto out_decr;
1635		}
1636#ifdef SCTP_AUDITING_ENABLED
1637		sctp_auditing(4, inp, stcb, net);
1638#endif
1639		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1640			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1641			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1642		}
1643		break;
1644	case SCTP_TIMER_TYPE_COOKIE:
1645		if ((stcb == NULL) || (inp == NULL)) {
1646			break;
1647		}
1648		if (sctp_cookie_timer(inp, stcb, net)) {
1649			/* no need to unlock on tcb its gone */
1650			goto out_decr;
1651		}
1652		SCTP_STAT_INCR(sctps_timocookie);
1653		stcb->asoc.timocookie++;
1654#ifdef SCTP_AUDITING_ENABLED
1655		sctp_auditing(4, inp, stcb, net);
1656#endif
1657		/*
1658		 * We consider T3 and Cookie timer pretty much the same with
1659		 * respect to where from in chunk_output.
1660		 */
1661		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1662		break;
1663	case SCTP_TIMER_TYPE_NEWCOOKIE:
1664		{
1665			struct timeval tv;
1666			int i, secret;
1667
1668			if (inp == NULL) {
1669				break;
1670			}
1671			SCTP_STAT_INCR(sctps_timosecret);
1672			(void)SCTP_GETTIME_TIMEVAL(&tv);
1673			SCTP_INP_WLOCK(inp);
1674			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1675			inp->sctp_ep.last_secret_number =
1676			    inp->sctp_ep.current_secret_number;
1677			inp->sctp_ep.current_secret_number++;
1678			if (inp->sctp_ep.current_secret_number >=
1679			    SCTP_HOW_MANY_SECRETS) {
1680				inp->sctp_ep.current_secret_number = 0;
1681			}
1682			secret = (int)inp->sctp_ep.current_secret_number;
1683			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1684				inp->sctp_ep.secret_key[secret][i] =
1685				    sctp_select_initial_TSN(&inp->sctp_ep);
1686			}
1687			SCTP_INP_WUNLOCK(inp);
1688			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1689		}
1690		did_output = 0;
1691		break;
1692	case SCTP_TIMER_TYPE_PATHMTURAISE:
1693		if ((stcb == NULL) || (inp == NULL)) {
1694			break;
1695		}
1696		SCTP_STAT_INCR(sctps_timopathmtu);
1697		sctp_pathmtu_timer(inp, stcb, net);
1698		did_output = 0;
1699		break;
1700	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1701		if ((stcb == NULL) || (inp == NULL)) {
1702			break;
1703		}
1704		if (sctp_shutdownack_timer(inp, stcb, net)) {
1705			/* no need to unlock on tcb its gone */
1706			goto out_decr;
1707		}
1708		SCTP_STAT_INCR(sctps_timoshutdownack);
1709		stcb->asoc.timoshutdownack++;
1710#ifdef SCTP_AUDITING_ENABLED
1711		sctp_auditing(4, inp, stcb, net);
1712#endif
1713		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1714		break;
1715	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1716		if ((stcb == NULL) || (inp == NULL)) {
1717			break;
1718		}
1719		SCTP_STAT_INCR(sctps_timoshutdownguard);
1720		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1721		/* no need to unlock on tcb its gone */
1722		goto out_decr;
1723
1724	case SCTP_TIMER_TYPE_STRRESET:
1725		if ((stcb == NULL) || (inp == NULL)) {
1726			break;
1727		}
1728		if (sctp_strreset_timer(inp, stcb, net)) {
1729			/* no need to unlock on tcb its gone */
1730			goto out_decr;
1731		}
1732		SCTP_STAT_INCR(sctps_timostrmrst);
1733		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1734		break;
1735	case SCTP_TIMER_TYPE_ASCONF:
1736		if ((stcb == NULL) || (inp == NULL)) {
1737			break;
1738		}
1739		if (sctp_asconf_timer(inp, stcb, net)) {
1740			/* no need to unlock on tcb its gone */
1741			goto out_decr;
1742		}
1743		SCTP_STAT_INCR(sctps_timoasconf);
1744#ifdef SCTP_AUDITING_ENABLED
1745		sctp_auditing(4, inp, stcb, net);
1746#endif
1747		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1748		break;
1749	case SCTP_TIMER_TYPE_PRIM_DELETED:
1750		if ((stcb == NULL) || (inp == NULL)) {
1751			break;
1752		}
1753		sctp_delete_prim_timer(inp, stcb, net);
1754		SCTP_STAT_INCR(sctps_timodelprim);
1755		break;
1756
1757	case SCTP_TIMER_TYPE_AUTOCLOSE:
1758		if ((stcb == NULL) || (inp == NULL)) {
1759			break;
1760		}
1761		SCTP_STAT_INCR(sctps_timoautoclose);
1762		sctp_autoclose_timer(inp, stcb, net);
1763		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1764		did_output = 0;
1765		break;
1766	case SCTP_TIMER_TYPE_ASOCKILL:
1767		if ((stcb == NULL) || (inp == NULL)) {
1768			break;
1769		}
1770		SCTP_STAT_INCR(sctps_timoassockill);
1771		/* Can we free it yet? */
1772		SCTP_INP_DECR_REF(inp);
1773		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1774#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1775		so = SCTP_INP_SO(inp);
1776		atomic_add_int(&stcb->asoc.refcnt, 1);
1777		SCTP_TCB_UNLOCK(stcb);
1778		SCTP_SOCKET_LOCK(so, 1);
1779		SCTP_TCB_LOCK(stcb);
1780		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1781#endif
1782		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1783#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1784		SCTP_SOCKET_UNLOCK(so, 1);
1785#endif
1786		/*
1787		 * free asoc, always unlocks (or destroy's) so prevent
1788		 * duplicate unlock or unlock of a free mtx :-0
1789		 */
1790		stcb = NULL;
1791		goto out_no_decr;
1792	case SCTP_TIMER_TYPE_INPKILL:
1793		SCTP_STAT_INCR(sctps_timoinpkill);
1794		if (inp == NULL) {
1795			break;
1796		}
1797		/*
1798		 * special case, take away our increment since WE are the
1799		 * killer
1800		 */
1801		SCTP_INP_DECR_REF(inp);
1802		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1803		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1804		    SCTP_CALLED_FROM_INPKILL_TIMER);
1805		inp = NULL;
1806		goto out_no_decr;
1807	default:
1808		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1809		    tmr->type);
1810		break;
1811	}
1812#ifdef SCTP_AUDITING_ENABLED
1813	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1814	if (inp)
1815		sctp_auditing(5, inp, stcb, net);
1816#endif
1817	if ((did_output) && stcb) {
1818		/*
1819		 * Now we need to clean up the control chunk chain if an
1820		 * ECNE is on it. It must be marked as UNSENT again so next
1821		 * call will continue to send it until such time that we get
1822		 * a CWR, to remove it. It is, however, less likely that we
1823		 * will find a ecn echo on the chain though.
1824		 */
1825		sctp_fix_ecn_echo(&stcb->asoc);
1826	}
1827get_out:
1828	if (stcb) {
1829		SCTP_TCB_UNLOCK(stcb);
1830	}
1831out_decr:
1832	if (inp) {
1833		SCTP_INP_DECR_REF(inp);
1834	}
1835out_no_decr:
1836	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1837	    type);
1838	CURVNET_RESTORE();
1839}
1840
1841void
1842sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1843    struct sctp_nets *net)
1844{
1845	uint32_t to_ticks;
1846	struct sctp_timer *tmr;
1847
1848	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1849		return;
1850
1851	tmr = NULL;
1852	if (stcb) {
1853		SCTP_TCB_LOCK_ASSERT(stcb);
1854	}
1855	switch (t_type) {
1856	case SCTP_TIMER_TYPE_ZERO_COPY:
1857		tmr = &inp->sctp_ep.zero_copy_timer;
1858		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1859		break;
1860	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1861		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1862		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1863		break;
1864	case SCTP_TIMER_TYPE_ADDR_WQ:
1865		/* Only 1 tick away :-) */
1866		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1867		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1868		break;
1869	case SCTP_TIMER_TYPE_SEND:
1870		/* Here we use the RTO timer */
1871		{
1872			int rto_val;
1873
1874			if ((stcb == NULL) || (net == NULL)) {
1875				return;
1876			}
1877			tmr = &net->rxt_timer;
1878			if (net->RTO == 0) {
1879				rto_val = stcb->asoc.initial_rto;
1880			} else {
1881				rto_val = net->RTO;
1882			}
1883			to_ticks = MSEC_TO_TICKS(rto_val);
1884		}
1885		break;
1886	case SCTP_TIMER_TYPE_INIT:
1887		/*
1888		 * Here we use the INIT timer default usually about 1
1889		 * minute.
1890		 */
1891		if ((stcb == NULL) || (net == NULL)) {
1892			return;
1893		}
1894		tmr = &net->rxt_timer;
1895		if (net->RTO == 0) {
1896			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1897		} else {
1898			to_ticks = MSEC_TO_TICKS(net->RTO);
1899		}
1900		break;
1901	case SCTP_TIMER_TYPE_RECV:
1902		/*
1903		 * Here we use the Delayed-Ack timer value from the inp
1904		 * ususually about 200ms.
1905		 */
1906		if (stcb == NULL) {
1907			return;
1908		}
1909		tmr = &stcb->asoc.dack_timer;
1910		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1911		break;
1912	case SCTP_TIMER_TYPE_SHUTDOWN:
1913		/* Here we use the RTO of the destination. */
1914		if ((stcb == NULL) || (net == NULL)) {
1915			return;
1916		}
1917		if (net->RTO == 0) {
1918			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1919		} else {
1920			to_ticks = MSEC_TO_TICKS(net->RTO);
1921		}
1922		tmr = &net->rxt_timer;
1923		break;
1924	case SCTP_TIMER_TYPE_HEARTBEAT:
1925		/*
1926		 * the net is used here so that we can add in the RTO. Even
1927		 * though we use a different timer. We also add the HB timer
1928		 * PLUS a random jitter.
1929		 */
1930		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1931			return;
1932		} else {
1933			uint32_t rndval;
1934			uint32_t jitter;
1935
1936			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1937			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1938				return;
1939			}
1940			if (net->RTO == 0) {
1941				to_ticks = stcb->asoc.initial_rto;
1942			} else {
1943				to_ticks = net->RTO;
1944			}
1945			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1946			jitter = rndval % to_ticks;
1947			if (jitter >= (to_ticks >> 1)) {
1948				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1949			} else {
1950				to_ticks = to_ticks - jitter;
1951			}
1952			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1953			    !(net->dest_state & SCTP_ADDR_PF)) {
1954				to_ticks += net->heart_beat_delay;
1955			}
1956			/*
1957			 * Now we must convert the to_ticks that are now in
1958			 * ms to ticks.
1959			 */
1960			to_ticks = MSEC_TO_TICKS(to_ticks);
1961			tmr = &net->hb_timer;
1962		}
1963		break;
1964	case SCTP_TIMER_TYPE_COOKIE:
1965		/*
1966		 * Here we can use the RTO timer from the network since one
1967		 * RTT was compelete. If a retran happened then we will be
1968		 * using the RTO initial value.
1969		 */
1970		if ((stcb == NULL) || (net == NULL)) {
1971			return;
1972		}
1973		if (net->RTO == 0) {
1974			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1975		} else {
1976			to_ticks = MSEC_TO_TICKS(net->RTO);
1977		}
1978		tmr = &net->rxt_timer;
1979		break;
1980	case SCTP_TIMER_TYPE_NEWCOOKIE:
1981		/*
1982		 * nothing needed but the endpoint here ususually about 60
1983		 * minutes.
1984		 */
1985		if (inp == NULL) {
1986			return;
1987		}
1988		tmr = &inp->sctp_ep.signature_change;
1989		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1990		break;
1991	case SCTP_TIMER_TYPE_ASOCKILL:
1992		if (stcb == NULL) {
1993			return;
1994		}
1995		tmr = &stcb->asoc.strreset_timer;
1996		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1997		break;
1998	case SCTP_TIMER_TYPE_INPKILL:
1999		/*
2000		 * The inp is setup to die. We re-use the signature_chage
2001		 * timer since that has stopped and we are in the GONE
2002		 * state.
2003		 */
2004		if (inp == NULL) {
2005			return;
2006		}
2007		tmr = &inp->sctp_ep.signature_change;
2008		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2009		break;
2010	case SCTP_TIMER_TYPE_PATHMTURAISE:
2011		/*
2012		 * Here we use the value found in the EP for PMTU ususually
2013		 * about 10 minutes.
2014		 */
2015		if ((stcb == NULL) || (inp == NULL)) {
2016			return;
2017		}
2018		if (net == NULL) {
2019			return;
2020		}
2021		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2022			return;
2023		}
2024		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2025		tmr = &net->pmtu_timer;
2026		break;
2027	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2028		/* Here we use the RTO of the destination */
2029		if ((stcb == NULL) || (net == NULL)) {
2030			return;
2031		}
2032		if (net->RTO == 0) {
2033			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2034		} else {
2035			to_ticks = MSEC_TO_TICKS(net->RTO);
2036		}
2037		tmr = &net->rxt_timer;
2038		break;
2039	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2040		/*
2041		 * Here we use the endpoints shutdown guard timer usually
2042		 * about 3 minutes.
2043		 */
2044		if ((inp == NULL) || (stcb == NULL)) {
2045			return;
2046		}
2047		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2048		tmr = &stcb->asoc.shut_guard_timer;
2049		break;
2050	case SCTP_TIMER_TYPE_STRRESET:
2051		/*
2052		 * Here the timer comes from the stcb but its value is from
2053		 * the net's RTO.
2054		 */
2055		if ((stcb == NULL) || (net == NULL)) {
2056			return;
2057		}
2058		if (net->RTO == 0) {
2059			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2060		} else {
2061			to_ticks = MSEC_TO_TICKS(net->RTO);
2062		}
2063		tmr = &stcb->asoc.strreset_timer;
2064		break;
2065	case SCTP_TIMER_TYPE_ASCONF:
2066		/*
2067		 * Here the timer comes from the stcb but its value is from
2068		 * the net's RTO.
2069		 */
2070		if ((stcb == NULL) || (net == NULL)) {
2071			return;
2072		}
2073		if (net->RTO == 0) {
2074			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2075		} else {
2076			to_ticks = MSEC_TO_TICKS(net->RTO);
2077		}
2078		tmr = &stcb->asoc.asconf_timer;
2079		break;
2080	case SCTP_TIMER_TYPE_PRIM_DELETED:
2081		if ((stcb == NULL) || (net != NULL)) {
2082			return;
2083		}
2084		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2085		tmr = &stcb->asoc.delete_prim_timer;
2086		break;
2087	case SCTP_TIMER_TYPE_AUTOCLOSE:
2088		if (stcb == NULL) {
2089			return;
2090		}
2091		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2092			/*
2093			 * Really an error since stcb is NOT set to
2094			 * autoclose
2095			 */
2096			return;
2097		}
2098		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2099		tmr = &stcb->asoc.autoclose_timer;
2100		break;
2101	default:
2102		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2103		    __FUNCTION__, t_type);
2104		return;
2105		break;
2106	}
2107	if ((to_ticks <= 0) || (tmr == NULL)) {
2108		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2109		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2110		return;
2111	}
2112	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2113		/*
2114		 * we do NOT allow you to have it already running. if it is
2115		 * we leave the current one up unchanged
2116		 */
2117		return;
2118	}
2119	/* At this point we can proceed */
2120	if (t_type == SCTP_TIMER_TYPE_SEND) {
2121		stcb->asoc.num_send_timers_up++;
2122	}
2123	tmr->stopped_from = 0;
2124	tmr->type = t_type;
2125	tmr->ep = (void *)inp;
2126	tmr->tcb = (void *)stcb;
2127	tmr->net = (void *)net;
2128	tmr->self = (void *)tmr;
2129	tmr->vnet = (void *)curvnet;
2130	tmr->ticks = sctp_get_tick_count();
2131	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2132	return;
2133}
2134
2135void
2136sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2137    struct sctp_nets *net, uint32_t from)
2138{
2139	struct sctp_timer *tmr;
2140
2141	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2142	    (inp == NULL))
2143		return;
2144
2145	tmr = NULL;
2146	if (stcb) {
2147		SCTP_TCB_LOCK_ASSERT(stcb);
2148	}
2149	switch (t_type) {
2150	case SCTP_TIMER_TYPE_ZERO_COPY:
2151		tmr = &inp->sctp_ep.zero_copy_timer;
2152		break;
2153	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2154		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2155		break;
2156	case SCTP_TIMER_TYPE_ADDR_WQ:
2157		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2158		break;
2159	case SCTP_TIMER_TYPE_SEND:
2160		if ((stcb == NULL) || (net == NULL)) {
2161			return;
2162		}
2163		tmr = &net->rxt_timer;
2164		break;
2165	case SCTP_TIMER_TYPE_INIT:
2166		if ((stcb == NULL) || (net == NULL)) {
2167			return;
2168		}
2169		tmr = &net->rxt_timer;
2170		break;
2171	case SCTP_TIMER_TYPE_RECV:
2172		if (stcb == NULL) {
2173			return;
2174		}
2175		tmr = &stcb->asoc.dack_timer;
2176		break;
2177	case SCTP_TIMER_TYPE_SHUTDOWN:
2178		if ((stcb == NULL) || (net == NULL)) {
2179			return;
2180		}
2181		tmr = &net->rxt_timer;
2182		break;
2183	case SCTP_TIMER_TYPE_HEARTBEAT:
2184		if ((stcb == NULL) || (net == NULL)) {
2185			return;
2186		}
2187		tmr = &net->hb_timer;
2188		break;
2189	case SCTP_TIMER_TYPE_COOKIE:
2190		if ((stcb == NULL) || (net == NULL)) {
2191			return;
2192		}
2193		tmr = &net->rxt_timer;
2194		break;
2195	case SCTP_TIMER_TYPE_NEWCOOKIE:
2196		/* nothing needed but the endpoint here */
2197		tmr = &inp->sctp_ep.signature_change;
2198		/*
2199		 * We re-use the newcookie timer for the INP kill timer. We
2200		 * must assure that we do not kill it by accident.
2201		 */
2202		break;
2203	case SCTP_TIMER_TYPE_ASOCKILL:
2204		/*
2205		 * Stop the asoc kill timer.
2206		 */
2207		if (stcb == NULL) {
2208			return;
2209		}
2210		tmr = &stcb->asoc.strreset_timer;
2211		break;
2212
2213	case SCTP_TIMER_TYPE_INPKILL:
2214		/*
2215		 * The inp is setup to die. We re-use the signature_chage
2216		 * timer since that has stopped and we are in the GONE
2217		 * state.
2218		 */
2219		tmr = &inp->sctp_ep.signature_change;
2220		break;
2221	case SCTP_TIMER_TYPE_PATHMTURAISE:
2222		if ((stcb == NULL) || (net == NULL)) {
2223			return;
2224		}
2225		tmr = &net->pmtu_timer;
2226		break;
2227	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2228		if ((stcb == NULL) || (net == NULL)) {
2229			return;
2230		}
2231		tmr = &net->rxt_timer;
2232		break;
2233	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2234		if (stcb == NULL) {
2235			return;
2236		}
2237		tmr = &stcb->asoc.shut_guard_timer;
2238		break;
2239	case SCTP_TIMER_TYPE_STRRESET:
2240		if (stcb == NULL) {
2241			return;
2242		}
2243		tmr = &stcb->asoc.strreset_timer;
2244		break;
2245	case SCTP_TIMER_TYPE_ASCONF:
2246		if (stcb == NULL) {
2247			return;
2248		}
2249		tmr = &stcb->asoc.asconf_timer;
2250		break;
2251	case SCTP_TIMER_TYPE_PRIM_DELETED:
2252		if (stcb == NULL) {
2253			return;
2254		}
2255		tmr = &stcb->asoc.delete_prim_timer;
2256		break;
2257	case SCTP_TIMER_TYPE_AUTOCLOSE:
2258		if (stcb == NULL) {
2259			return;
2260		}
2261		tmr = &stcb->asoc.autoclose_timer;
2262		break;
2263	default:
2264		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2265		    __FUNCTION__, t_type);
2266		break;
2267	}
2268	if (tmr == NULL) {
2269		return;
2270	}
2271	if ((tmr->type != t_type) && tmr->type) {
2272		/*
2273		 * Ok we have a timer that is under joint use. Cookie timer
2274		 * per chance with the SEND timer. We therefore are NOT
2275		 * running the timer that the caller wants stopped.  So just
2276		 * return.
2277		 */
2278		return;
2279	}
2280	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2281		stcb->asoc.num_send_timers_up--;
2282		if (stcb->asoc.num_send_timers_up < 0) {
2283			stcb->asoc.num_send_timers_up = 0;
2284		}
2285	}
2286	tmr->self = NULL;
2287	tmr->stopped_from = from;
2288	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2289	return;
2290}
2291
2292uint32_t
2293sctp_calculate_len(struct mbuf *m)
2294{
2295	uint32_t tlen = 0;
2296	struct mbuf *at;
2297
2298	at = m;
2299	while (at) {
2300		tlen += SCTP_BUF_LEN(at);
2301		at = SCTP_BUF_NEXT(at);
2302	}
2303	return (tlen);
2304}
2305
2306void
2307sctp_mtu_size_reset(struct sctp_inpcb *inp,
2308    struct sctp_association *asoc, uint32_t mtu)
2309{
2310	/*
2311	 * Reset the P-MTU size on this association, this involves changing
2312	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2313	 * allow the DF flag to be cleared.
2314	 */
2315	struct sctp_tmit_chunk *chk;
2316	unsigned int eff_mtu, ovh;
2317
2318	asoc->smallest_mtu = mtu;
2319	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2320		ovh = SCTP_MIN_OVERHEAD;
2321	} else {
2322		ovh = SCTP_MIN_V4_OVERHEAD;
2323	}
2324	eff_mtu = mtu - ovh;
2325	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2326		if (chk->send_size > eff_mtu) {
2327			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2328		}
2329	}
2330	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2331		if (chk->send_size > eff_mtu) {
2332			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2333		}
2334	}
2335}
2336
2337
2338/*
2339 * given an association and starting time of the current RTT period return
2340 * RTO in number of msecs net should point to the current network
2341 */
2342
2343uint32_t
2344sctp_calculate_rto(struct sctp_tcb *stcb,
2345    struct sctp_association *asoc,
2346    struct sctp_nets *net,
2347    struct timeval *told,
2348    int safe, int rtt_from_sack)
2349{
2350	/*-
2351	 * given an association and the starting time of the current RTT
2352	 * period (in value1/value2) return RTO in number of msecs.
2353	 */
2354	int32_t rtt;		/* RTT in ms */
2355	uint32_t new_rto;
2356	int first_measure = 0;
2357	struct timeval now, then, *old;
2358
2359	/* Copy it out for sparc64 */
2360	if (safe == sctp_align_unsafe_makecopy) {
2361		old = &then;
2362		memcpy(&then, told, sizeof(struct timeval));
2363	} else if (safe == sctp_align_safe_nocopy) {
2364		old = told;
2365	} else {
2366		/* error */
2367		SCTP_PRINTF("Huh, bad rto calc call\n");
2368		return (0);
2369	}
2370	/************************/
2371	/* 1. calculate new RTT */
2372	/************************/
2373	/* get the current time */
2374	if (stcb->asoc.use_precise_time) {
2375		(void)SCTP_GETPTIME_TIMEVAL(&now);
2376	} else {
2377		(void)SCTP_GETTIME_TIMEVAL(&now);
2378	}
2379	timevalsub(&now, old);
2380	/* store the current RTT in us */
2381	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2382	        (uint64_t) now.tv_usec;
2383
2384	/* computer rtt in ms */
2385	rtt = net->rtt / 1000;
2386	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2387		/*
2388		 * Tell the CC module that a new update has just occurred
2389		 * from a sack
2390		 */
2391		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2392	}
2393	/*
2394	 * Do we need to determine the lan? We do this only on sacks i.e.
2395	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2396	 */
2397	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2398	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2399		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2400			net->lan_type = SCTP_LAN_INTERNET;
2401		} else {
2402			net->lan_type = SCTP_LAN_LOCAL;
2403		}
2404	}
2405	/***************************/
2406	/* 2. update RTTVAR & SRTT */
2407	/***************************/
2408	/*-
2409	 * Compute the scaled average lastsa and the
2410	 * scaled variance lastsv as described in van Jacobson
2411	 * Paper "Congestion Avoidance and Control", Annex A.
2412	 *
2413	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2414	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2415	 */
2416	if (net->RTO_measured) {
2417		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2418		net->lastsa += rtt;
2419		if (rtt < 0) {
2420			rtt = -rtt;
2421		}
2422		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2423		net->lastsv += rtt;
2424		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2425			rto_logging(net, SCTP_LOG_RTTVAR);
2426		}
2427	} else {
2428		/* First RTO measurment */
2429		net->RTO_measured = 1;
2430		first_measure = 1;
2431		net->lastsa = rtt << SCTP_RTT_SHIFT;
2432		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2433		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2434			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2435		}
2436	}
2437	if (net->lastsv == 0) {
2438		net->lastsv = SCTP_CLOCK_GRANULARITY;
2439	}
2440	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2441	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2442	    (stcb->asoc.sat_network_lockout == 0)) {
2443		stcb->asoc.sat_network = 1;
2444	} else if ((!first_measure) && stcb->asoc.sat_network) {
2445		stcb->asoc.sat_network = 0;
2446		stcb->asoc.sat_network_lockout = 1;
2447	}
2448	/* bound it, per C6/C7 in Section 5.3.1 */
2449	if (new_rto < stcb->asoc.minrto) {
2450		new_rto = stcb->asoc.minrto;
2451	}
2452	if (new_rto > stcb->asoc.maxrto) {
2453		new_rto = stcb->asoc.maxrto;
2454	}
2455	/* we are now returning the RTO */
2456	return (new_rto);
2457}
2458
2459/*
2460 * return a pointer to a contiguous piece of data from the given mbuf chain
2461 * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2462 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2463 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2464 */
2465caddr_t
2466sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2467{
2468	uint32_t count;
2469	uint8_t *ptr;
2470
2471	ptr = in_ptr;
2472	if ((off < 0) || (len <= 0))
2473		return (NULL);
2474
2475	/* find the desired start location */
2476	while ((m != NULL) && (off > 0)) {
2477		if (off < SCTP_BUF_LEN(m))
2478			break;
2479		off -= SCTP_BUF_LEN(m);
2480		m = SCTP_BUF_NEXT(m);
2481	}
2482	if (m == NULL)
2483		return (NULL);
2484
2485	/* is the current mbuf large enough (eg. contiguous)? */
2486	if ((SCTP_BUF_LEN(m) - off) >= len) {
2487		return (mtod(m, caddr_t)+off);
2488	} else {
2489		/* else, it spans more than one mbuf, so save a temp copy... */
2490		while ((m != NULL) && (len > 0)) {
2491			count = min(SCTP_BUF_LEN(m) - off, len);
2492			bcopy(mtod(m, caddr_t)+off, ptr, count);
2493			len -= count;
2494			ptr += count;
2495			off = 0;
2496			m = SCTP_BUF_NEXT(m);
2497		}
2498		if ((m == NULL) && (len > 0))
2499			return (NULL);
2500		else
2501			return ((caddr_t)in_ptr);
2502	}
2503}
2504
2505
2506
2507struct sctp_paramhdr *
2508sctp_get_next_param(struct mbuf *m,
2509    int offset,
2510    struct sctp_paramhdr *pull,
2511    int pull_limit)
2512{
2513	/* This just provides a typed signature to Peter's Pull routine */
2514	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2515	    (uint8_t *) pull));
2516}
2517
2518
2519int
2520sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2521{
2522	/*
2523	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2524	 * padlen is > 3 this routine will fail.
2525	 */
2526	uint8_t *dp;
2527	int i;
2528
2529	if (padlen > 3) {
2530		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2531		return (ENOBUFS);
2532	}
2533	if (padlen <= M_TRAILINGSPACE(m)) {
2534		/*
2535		 * The easy way. We hope the majority of the time we hit
2536		 * here :)
2537		 */
2538		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2539		SCTP_BUF_LEN(m) += padlen;
2540	} else {
2541		/* Hard way we must grow the mbuf */
2542		struct mbuf *tmp;
2543
2544		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2545		if (tmp == NULL) {
2546			/* Out of space GAK! we are in big trouble. */
2547			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2548			return (ENOBUFS);
2549		}
2550		/* setup and insert in middle */
2551		SCTP_BUF_LEN(tmp) = padlen;
2552		SCTP_BUF_NEXT(tmp) = NULL;
2553		SCTP_BUF_NEXT(m) = tmp;
2554		dp = mtod(tmp, uint8_t *);
2555	}
2556	/* zero out the pad */
2557	for (i = 0; i < padlen; i++) {
2558		*dp = 0;
2559		dp++;
2560	}
2561	return (0);
2562}
2563
2564int
2565sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2566{
2567	/* find the last mbuf in chain and pad it */
2568	struct mbuf *m_at;
2569
2570	if (last_mbuf) {
2571		return (sctp_add_pad_tombuf(last_mbuf, padval));
2572	} else {
2573		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2574			if (SCTP_BUF_NEXT(m_at) == NULL) {
2575				return (sctp_add_pad_tombuf(m_at, padval));
2576			}
2577		}
2578	}
2579	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2580	return (EFAULT);
2581}
2582
2583static void
2584sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2585    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2586#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2587    SCTP_UNUSED
2588#endif
2589)
2590{
2591	struct mbuf *m_notify;
2592	struct sctp_assoc_change *sac;
2593	struct sctp_queued_to_read *control;
2594	size_t notif_len, abort_len;
2595	unsigned int i;
2596
2597#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2598	struct socket *so;
2599
2600#endif
2601
2602	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2603		notif_len = sizeof(struct sctp_assoc_change);
2604		if (abort != NULL) {
2605			abort_len = ntohs(abort->ch.chunk_length);
2606		} else {
2607			abort_len = 0;
2608		}
2609		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2610			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2611		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2612			notif_len += abort_len;
2613		}
2614		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2615		if (m_notify == NULL) {
2616			/* Retry with smaller value. */
2617			notif_len = sizeof(struct sctp_assoc_change);
2618			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2619			if (m_notify == NULL) {
2620				goto set_error;
2621			}
2622		}
2623		SCTP_BUF_NEXT(m_notify) = NULL;
2624		sac = mtod(m_notify, struct sctp_assoc_change *);
2625		sac->sac_type = SCTP_ASSOC_CHANGE;
2626		sac->sac_flags = 0;
2627		sac->sac_length = sizeof(struct sctp_assoc_change);
2628		sac->sac_state = state;
2629		sac->sac_error = error;
2630		/* XXX verify these stream counts */
2631		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2632		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2633		sac->sac_assoc_id = sctp_get_associd(stcb);
2634		if (notif_len > sizeof(struct sctp_assoc_change)) {
2635			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2636				i = 0;
2637				if (stcb->asoc.peer_supports_prsctp) {
2638					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2639				}
2640				if (stcb->asoc.peer_supports_auth) {
2641					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2642				}
2643				if (stcb->asoc.peer_supports_asconf) {
2644					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2645				}
2646				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2647				if (stcb->asoc.peer_supports_strreset) {
2648					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2649				}
2650				sac->sac_length += i;
2651			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2652				memcpy(sac->sac_info, abort, abort_len);
2653				sac->sac_length += abort_len;
2654			}
2655		}
2656		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2657		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2658		    0, 0, stcb->asoc.context, 0, 0, 0,
2659		    m_notify);
2660		if (control != NULL) {
2661			control->length = SCTP_BUF_LEN(m_notify);
2662			/* not that we need this */
2663			control->tail_mbuf = m_notify;
2664			control->spec_flags = M_NOTIFICATION;
2665			sctp_add_to_readq(stcb->sctp_ep, stcb,
2666			    control,
2667			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2668			    so_locked);
2669		} else {
2670			sctp_m_freem(m_notify);
2671		}
2672	}
2673	/*
2674	 * For 1-to-1 style sockets, we send up and error when an ABORT
2675	 * comes in.
2676	 */
2677set_error:
2678	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2679	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2680	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2681		SOCK_LOCK(stcb->sctp_socket);
2682		if (from_peer) {
2683			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2684				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2685				stcb->sctp_socket->so_error = ECONNREFUSED;
2686			} else {
2687				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2688				stcb->sctp_socket->so_error = ECONNRESET;
2689			}
2690		} else {
2691			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2692			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2693				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2694				stcb->sctp_socket->so_error = ETIMEDOUT;
2695			} else {
2696				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2697				stcb->sctp_socket->so_error = ECONNABORTED;
2698			}
2699		}
2700	}
2701	/* Wake ANY sleepers */
2702#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2703	so = SCTP_INP_SO(stcb->sctp_ep);
2704	if (!so_locked) {
2705		atomic_add_int(&stcb->asoc.refcnt, 1);
2706		SCTP_TCB_UNLOCK(stcb);
2707		SCTP_SOCKET_LOCK(so, 1);
2708		SCTP_TCB_LOCK(stcb);
2709		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2710		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2711			SCTP_SOCKET_UNLOCK(so, 1);
2712			return;
2713		}
2714	}
2715#endif
2716	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2717	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2718	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2719		socantrcvmore_locked(stcb->sctp_socket);
2720	}
2721	sorwakeup(stcb->sctp_socket);
2722	sowwakeup(stcb->sctp_socket);
2723#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2724	if (!so_locked) {
2725		SCTP_SOCKET_UNLOCK(so, 1);
2726	}
2727#endif
2728}
2729
2730static void
2731sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2732    struct sockaddr *sa, uint32_t error)
2733{
2734	struct mbuf *m_notify;
2735	struct sctp_paddr_change *spc;
2736	struct sctp_queued_to_read *control;
2737
2738	if ((stcb == NULL) ||
2739	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2740		/* event not enabled */
2741		return;
2742	}
2743	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2744	if (m_notify == NULL)
2745		return;
2746	SCTP_BUF_LEN(m_notify) = 0;
2747	spc = mtod(m_notify, struct sctp_paddr_change *);
2748	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2749	spc->spc_flags = 0;
2750	spc->spc_length = sizeof(struct sctp_paddr_change);
2751	switch (sa->sa_family) {
2752#ifdef INET
2753	case AF_INET:
2754		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2755		break;
2756#endif
2757#ifdef INET6
2758	case AF_INET6:
2759		{
2760			struct sockaddr_in6 *sin6;
2761
2762			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2763
2764			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2765			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2766				if (sin6->sin6_scope_id == 0) {
2767					/* recover scope_id for user */
2768					(void)sa6_recoverscope(sin6);
2769				} else {
2770					/* clear embedded scope_id for user */
2771					in6_clearscope(&sin6->sin6_addr);
2772				}
2773			}
2774			break;
2775		}
2776#endif
2777	default:
2778		/* TSNH */
2779		break;
2780	}
2781	spc->spc_state = state;
2782	spc->spc_error = error;
2783	spc->spc_assoc_id = sctp_get_associd(stcb);
2784
2785	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2786	SCTP_BUF_NEXT(m_notify) = NULL;
2787
2788	/* append to socket */
2789	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2790	    0, 0, stcb->asoc.context, 0, 0, 0,
2791	    m_notify);
2792	if (control == NULL) {
2793		/* no memory */
2794		sctp_m_freem(m_notify);
2795		return;
2796	}
2797	control->length = SCTP_BUF_LEN(m_notify);
2798	control->spec_flags = M_NOTIFICATION;
2799	/* not that we need this */
2800	control->tail_mbuf = m_notify;
2801	sctp_add_to_readq(stcb->sctp_ep, stcb,
2802	    control,
2803	    &stcb->sctp_socket->so_rcv, 1,
2804	    SCTP_READ_LOCK_NOT_HELD,
2805	    SCTP_SO_NOT_LOCKED);
2806}
2807
2808
2809static void
2810sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2811    struct sctp_tmit_chunk *chk, int so_locked
2812#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2813    SCTP_UNUSED
2814#endif
2815)
2816{
2817	struct mbuf *m_notify;
2818	struct sctp_send_failed *ssf;
2819	struct sctp_send_failed_event *ssfe;
2820	struct sctp_queued_to_read *control;
2821	int length;
2822
2823	if ((stcb == NULL) ||
2824	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2825	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2826		/* event not enabled */
2827		return;
2828	}
2829	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2830		length = sizeof(struct sctp_send_failed_event);
2831	} else {
2832		length = sizeof(struct sctp_send_failed);
2833	}
2834	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2835	if (m_notify == NULL)
2836		/* no space left */
2837		return;
2838	length += chk->send_size;
2839	length -= sizeof(struct sctp_data_chunk);
2840	SCTP_BUF_LEN(m_notify) = 0;
2841	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2842		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2843		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2844		if (sent) {
2845			ssfe->ssfe_flags = SCTP_DATA_SENT;
2846		} else {
2847			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2848		}
2849		ssfe->ssfe_length = length;
2850		ssfe->ssfe_error = error;
2851		/* not exactly what the user sent in, but should be close :) */
2852		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2853		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2854		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2855		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2856		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2857		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2858		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2859		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2860	} else {
2861		ssf = mtod(m_notify, struct sctp_send_failed *);
2862		ssf->ssf_type = SCTP_SEND_FAILED;
2863		if (sent) {
2864			ssf->ssf_flags = SCTP_DATA_SENT;
2865		} else {
2866			ssf->ssf_flags = SCTP_DATA_UNSENT;
2867		}
2868		ssf->ssf_length = length;
2869		ssf->ssf_error = error;
2870		/* not exactly what the user sent in, but should be close :) */
2871		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2872		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2873		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2874		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2875		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2876		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2877		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2878		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2879		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2880	}
2881	if (chk->data) {
2882		/*
2883		 * trim off the sctp chunk header(it should be there)
2884		 */
2885		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2886			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2887			sctp_mbuf_crush(chk->data);
2888			chk->send_size -= sizeof(struct sctp_data_chunk);
2889		}
2890	}
2891	SCTP_BUF_NEXT(m_notify) = chk->data;
2892	/* Steal off the mbuf */
2893	chk->data = NULL;
2894	/*
2895	 * For this case, we check the actual socket buffer, since the assoc
2896	 * is going away we don't want to overfill the socket buffer for a
2897	 * non-reader
2898	 */
2899	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2900		sctp_m_freem(m_notify);
2901		return;
2902	}
2903	/* append to socket */
2904	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2905	    0, 0, stcb->asoc.context, 0, 0, 0,
2906	    m_notify);
2907	if (control == NULL) {
2908		/* no memory */
2909		sctp_m_freem(m_notify);
2910		return;
2911	}
2912	control->spec_flags = M_NOTIFICATION;
2913	sctp_add_to_readq(stcb->sctp_ep, stcb,
2914	    control,
2915	    &stcb->sctp_socket->so_rcv, 1,
2916	    SCTP_READ_LOCK_NOT_HELD,
2917	    so_locked);
2918}
2919
2920
2921static void
2922sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2923    struct sctp_stream_queue_pending *sp, int so_locked
2924#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2925    SCTP_UNUSED
2926#endif
2927)
2928{
2929	struct mbuf *m_notify;
2930	struct sctp_send_failed *ssf;
2931	struct sctp_send_failed_event *ssfe;
2932	struct sctp_queued_to_read *control;
2933	int length;
2934
2935	if ((stcb == NULL) ||
2936	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2937	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2938		/* event not enabled */
2939		return;
2940	}
2941	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2942		length = sizeof(struct sctp_send_failed_event);
2943	} else {
2944		length = sizeof(struct sctp_send_failed);
2945	}
2946	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2947	if (m_notify == NULL) {
2948		/* no space left */
2949		return;
2950	}
2951	length += sp->length;
2952	SCTP_BUF_LEN(m_notify) = 0;
2953	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2954		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2955		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2956		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2957		ssfe->ssfe_length = length;
2958		ssfe->ssfe_error = error;
2959		/* not exactly what the user sent in, but should be close :) */
2960		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2961		ssfe->ssfe_info.snd_sid = sp->stream;
2962		if (sp->some_taken) {
2963			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2964		} else {
2965			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2966		}
2967		ssfe->ssfe_info.snd_ppid = sp->ppid;
2968		ssfe->ssfe_info.snd_context = sp->context;
2969		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2970		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2971		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2972	} else {
2973		ssf = mtod(m_notify, struct sctp_send_failed *);
2974		ssf->ssf_type = SCTP_SEND_FAILED;
2975		ssf->ssf_flags = SCTP_DATA_UNSENT;
2976		ssf->ssf_length = length;
2977		ssf->ssf_error = error;
2978		/* not exactly what the user sent in, but should be close :) */
2979		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2980		ssf->ssf_info.sinfo_stream = sp->stream;
2981		ssf->ssf_info.sinfo_ssn = 0;
2982		if (sp->some_taken) {
2983			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2984		} else {
2985			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2986		}
2987		ssf->ssf_info.sinfo_ppid = sp->ppid;
2988		ssf->ssf_info.sinfo_context = sp->context;
2989		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2990		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2991		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2992	}
2993	SCTP_BUF_NEXT(m_notify) = sp->data;
2994
2995	/* Steal off the mbuf */
2996	sp->data = NULL;
2997	/*
2998	 * For this case, we check the actual socket buffer, since the assoc
2999	 * is going away we don't want to overfill the socket buffer for a
3000	 * non-reader
3001	 */
3002	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3003		sctp_m_freem(m_notify);
3004		return;
3005	}
3006	/* append to socket */
3007	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3008	    0, 0, stcb->asoc.context, 0, 0, 0,
3009	    m_notify);
3010	if (control == NULL) {
3011		/* no memory */
3012		sctp_m_freem(m_notify);
3013		return;
3014	}
3015	control->spec_flags = M_NOTIFICATION;
3016	sctp_add_to_readq(stcb->sctp_ep, stcb,
3017	    control,
3018	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3019}
3020
3021
3022
3023static void
3024sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3025{
3026	struct mbuf *m_notify;
3027	struct sctp_adaptation_event *sai;
3028	struct sctp_queued_to_read *control;
3029
3030	if ((stcb == NULL) ||
3031	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3032		/* event not enabled */
3033		return;
3034	}
3035	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3036	if (m_notify == NULL)
3037		/* no space left */
3038		return;
3039	SCTP_BUF_LEN(m_notify) = 0;
3040	sai = mtod(m_notify, struct sctp_adaptation_event *);
3041	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3042	sai->sai_flags = 0;
3043	sai->sai_length = sizeof(struct sctp_adaptation_event);
3044	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3045	sai->sai_assoc_id = sctp_get_associd(stcb);
3046
3047	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3048	SCTP_BUF_NEXT(m_notify) = NULL;
3049
3050	/* append to socket */
3051	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3052	    0, 0, stcb->asoc.context, 0, 0, 0,
3053	    m_notify);
3054	if (control == NULL) {
3055		/* no memory */
3056		sctp_m_freem(m_notify);
3057		return;
3058	}
3059	control->length = SCTP_BUF_LEN(m_notify);
3060	control->spec_flags = M_NOTIFICATION;
3061	/* not that we need this */
3062	control->tail_mbuf = m_notify;
3063	sctp_add_to_readq(stcb->sctp_ep, stcb,
3064	    control,
3065	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3066}
3067
3068/* This always must be called with the read-queue LOCKED in the INP */
3069static void
3070sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3071    uint32_t val, int so_locked
3072#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3073    SCTP_UNUSED
3074#endif
3075)
3076{
3077	struct mbuf *m_notify;
3078	struct sctp_pdapi_event *pdapi;
3079	struct sctp_queued_to_read *control;
3080	struct sockbuf *sb;
3081
3082	if ((stcb == NULL) ||
3083	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3084		/* event not enabled */
3085		return;
3086	}
3087	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3088		return;
3089	}
3090	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3091	if (m_notify == NULL)
3092		/* no space left */
3093		return;
3094	SCTP_BUF_LEN(m_notify) = 0;
3095	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3096	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3097	pdapi->pdapi_flags = 0;
3098	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3099	pdapi->pdapi_indication = error;
3100	pdapi->pdapi_stream = (val >> 16);
3101	pdapi->pdapi_seq = (val & 0x0000ffff);
3102	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3103
3104	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3105	SCTP_BUF_NEXT(m_notify) = NULL;
3106	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3107	    0, 0, stcb->asoc.context, 0, 0, 0,
3108	    m_notify);
3109	if (control == NULL) {
3110		/* no memory */
3111		sctp_m_freem(m_notify);
3112		return;
3113	}
3114	control->spec_flags = M_NOTIFICATION;
3115	control->length = SCTP_BUF_LEN(m_notify);
3116	/* not that we need this */
3117	control->tail_mbuf = m_notify;
3118	control->held_length = 0;
3119	control->length = 0;
3120	sb = &stcb->sctp_socket->so_rcv;
3121	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3122		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3123	}
3124	sctp_sballoc(stcb, sb, m_notify);
3125	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3126		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3127	}
3128	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3129	control->end_added = 1;
3130	if (stcb->asoc.control_pdapi)
3131		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3132	else {
3133		/* we really should not see this case */
3134		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3135	}
3136	if (stcb->sctp_ep && stcb->sctp_socket) {
3137		/* This should always be the case */
3138#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3139		struct socket *so;
3140
3141		so = SCTP_INP_SO(stcb->sctp_ep);
3142		if (!so_locked) {
3143			atomic_add_int(&stcb->asoc.refcnt, 1);
3144			SCTP_TCB_UNLOCK(stcb);
3145			SCTP_SOCKET_LOCK(so, 1);
3146			SCTP_TCB_LOCK(stcb);
3147			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3148			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3149				SCTP_SOCKET_UNLOCK(so, 1);
3150				return;
3151			}
3152		}
3153#endif
3154		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3155#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3156		if (!so_locked) {
3157			SCTP_SOCKET_UNLOCK(so, 1);
3158		}
3159#endif
3160	}
3161}
3162
3163static void
3164sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3165{
3166	struct mbuf *m_notify;
3167	struct sctp_shutdown_event *sse;
3168	struct sctp_queued_to_read *control;
3169
3170	/*
3171	 * For TCP model AND UDP connected sockets we will send an error up
3172	 * when an SHUTDOWN completes
3173	 */
3174	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3175	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3176		/* mark socket closed for read/write and wakeup! */
3177#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3178		struct socket *so;
3179
3180		so = SCTP_INP_SO(stcb->sctp_ep);
3181		atomic_add_int(&stcb->asoc.refcnt, 1);
3182		SCTP_TCB_UNLOCK(stcb);
3183		SCTP_SOCKET_LOCK(so, 1);
3184		SCTP_TCB_LOCK(stcb);
3185		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3186		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3187			SCTP_SOCKET_UNLOCK(so, 1);
3188			return;
3189		}
3190#endif
3191		socantsendmore(stcb->sctp_socket);
3192#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3193		SCTP_SOCKET_UNLOCK(so, 1);
3194#endif
3195	}
3196	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3197		/* event not enabled */
3198		return;
3199	}
3200	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3201	if (m_notify == NULL)
3202		/* no space left */
3203		return;
3204	sse = mtod(m_notify, struct sctp_shutdown_event *);
3205	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3206	sse->sse_flags = 0;
3207	sse->sse_length = sizeof(struct sctp_shutdown_event);
3208	sse->sse_assoc_id = sctp_get_associd(stcb);
3209
3210	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3211	SCTP_BUF_NEXT(m_notify) = NULL;
3212
3213	/* append to socket */
3214	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3215	    0, 0, stcb->asoc.context, 0, 0, 0,
3216	    m_notify);
3217	if (control == NULL) {
3218		/* no memory */
3219		sctp_m_freem(m_notify);
3220		return;
3221	}
3222	control->spec_flags = M_NOTIFICATION;
3223	control->length = SCTP_BUF_LEN(m_notify);
3224	/* not that we need this */
3225	control->tail_mbuf = m_notify;
3226	sctp_add_to_readq(stcb->sctp_ep, stcb,
3227	    control,
3228	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3229}
3230
3231static void
3232sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3233    int so_locked
3234#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3235    SCTP_UNUSED
3236#endif
3237)
3238{
3239	struct mbuf *m_notify;
3240	struct sctp_sender_dry_event *event;
3241	struct sctp_queued_to_read *control;
3242
3243	if ((stcb == NULL) ||
3244	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3245		/* event not enabled */
3246		return;
3247	}
3248	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3249	if (m_notify == NULL) {
3250		/* no space left */
3251		return;
3252	}
3253	SCTP_BUF_LEN(m_notify) = 0;
3254	event = mtod(m_notify, struct sctp_sender_dry_event *);
3255	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3256	event->sender_dry_flags = 0;
3257	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3258	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3259
3260	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3261	SCTP_BUF_NEXT(m_notify) = NULL;
3262
3263	/* append to socket */
3264	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3265	    0, 0, stcb->asoc.context, 0, 0, 0,
3266	    m_notify);
3267	if (control == NULL) {
3268		/* no memory */
3269		sctp_m_freem(m_notify);
3270		return;
3271	}
3272	control->length = SCTP_BUF_LEN(m_notify);
3273	control->spec_flags = M_NOTIFICATION;
3274	/* not that we need this */
3275	control->tail_mbuf = m_notify;
3276	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3277	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3278}
3279
3280
3281void
3282sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3283{
3284	struct mbuf *m_notify;
3285	struct sctp_queued_to_read *control;
3286	struct sctp_stream_change_event *stradd;
3287	int len;
3288
3289	if ((stcb == NULL) ||
3290	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3291		/* event not enabled */
3292		return;
3293	}
3294	if ((stcb->asoc.peer_req_out) && flag) {
3295		/* Peer made the request, don't tell the local user */
3296		stcb->asoc.peer_req_out = 0;
3297		return;
3298	}
3299	stcb->asoc.peer_req_out = 0;
3300	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3301	if (m_notify == NULL)
3302		/* no space left */
3303		return;
3304	SCTP_BUF_LEN(m_notify) = 0;
3305	len = sizeof(struct sctp_stream_change_event);
3306	if (len > M_TRAILINGSPACE(m_notify)) {
3307		/* never enough room */
3308		sctp_m_freem(m_notify);
3309		return;
3310	}
3311	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3312	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3313	stradd->strchange_flags = flag;
3314	stradd->strchange_length = len;
3315	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3316	stradd->strchange_instrms = numberin;
3317	stradd->strchange_outstrms = numberout;
3318	SCTP_BUF_LEN(m_notify) = len;
3319	SCTP_BUF_NEXT(m_notify) = NULL;
3320	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3321		/* no space */
3322		sctp_m_freem(m_notify);
3323		return;
3324	}
3325	/* append to socket */
3326	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3327	    0, 0, stcb->asoc.context, 0, 0, 0,
3328	    m_notify);
3329	if (control == NULL) {
3330		/* no memory */
3331		sctp_m_freem(m_notify);
3332		return;
3333	}
3334	control->spec_flags = M_NOTIFICATION;
3335	control->length = SCTP_BUF_LEN(m_notify);
3336	/* not that we need this */
3337	control->tail_mbuf = m_notify;
3338	sctp_add_to_readq(stcb->sctp_ep, stcb,
3339	    control,
3340	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3341}
3342
3343void
3344sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3345{
3346	struct mbuf *m_notify;
3347	struct sctp_queued_to_read *control;
3348	struct sctp_assoc_reset_event *strasoc;
3349	int len;
3350
3351	if ((stcb == NULL) ||
3352	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3353		/* event not enabled */
3354		return;
3355	}
3356	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3357	if (m_notify == NULL)
3358		/* no space left */
3359		return;
3360	SCTP_BUF_LEN(m_notify) = 0;
3361	len = sizeof(struct sctp_assoc_reset_event);
3362	if (len > M_TRAILINGSPACE(m_notify)) {
3363		/* never enough room */
3364		sctp_m_freem(m_notify);
3365		return;
3366	}
3367	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3368	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3369	strasoc->assocreset_flags = flag;
3370	strasoc->assocreset_length = len;
3371	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3372	strasoc->assocreset_local_tsn = sending_tsn;
3373	strasoc->assocreset_remote_tsn = recv_tsn;
3374	SCTP_BUF_LEN(m_notify) = len;
3375	SCTP_BUF_NEXT(m_notify) = NULL;
3376	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3377		/* no space */
3378		sctp_m_freem(m_notify);
3379		return;
3380	}
3381	/* append to socket */
3382	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3383	    0, 0, stcb->asoc.context, 0, 0, 0,
3384	    m_notify);
3385	if (control == NULL) {
3386		/* no memory */
3387		sctp_m_freem(m_notify);
3388		return;
3389	}
3390	control->spec_flags = M_NOTIFICATION;
3391	control->length = SCTP_BUF_LEN(m_notify);
3392	/* not that we need this */
3393	control->tail_mbuf = m_notify;
3394	sctp_add_to_readq(stcb->sctp_ep, stcb,
3395	    control,
3396	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3397}
3398
3399
3400
3401static void
3402sctp_notify_stream_reset(struct sctp_tcb *stcb,
3403    int number_entries, uint16_t * list, int flag)
3404{
3405	struct mbuf *m_notify;
3406	struct sctp_queued_to_read *control;
3407	struct sctp_stream_reset_event *strreset;
3408	int len;
3409
3410	if ((stcb == NULL) ||
3411	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3412		/* event not enabled */
3413		return;
3414	}
3415	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3416	if (m_notify == NULL)
3417		/* no space left */
3418		return;
3419	SCTP_BUF_LEN(m_notify) = 0;
3420	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3421	if (len > M_TRAILINGSPACE(m_notify)) {
3422		/* never enough room */
3423		sctp_m_freem(m_notify);
3424		return;
3425	}
3426	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3427	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3428	strreset->strreset_flags = flag;
3429	strreset->strreset_length = len;
3430	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3431	if (number_entries) {
3432		int i;
3433
3434		for (i = 0; i < number_entries; i++) {
3435			strreset->strreset_stream_list[i] = ntohs(list[i]);
3436		}
3437	}
3438	SCTP_BUF_LEN(m_notify) = len;
3439	SCTP_BUF_NEXT(m_notify) = NULL;
3440	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3441		/* no space */
3442		sctp_m_freem(m_notify);
3443		return;
3444	}
3445	/* append to socket */
3446	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3447	    0, 0, stcb->asoc.context, 0, 0, 0,
3448	    m_notify);
3449	if (control == NULL) {
3450		/* no memory */
3451		sctp_m_freem(m_notify);
3452		return;
3453	}
3454	control->spec_flags = M_NOTIFICATION;
3455	control->length = SCTP_BUF_LEN(m_notify);
3456	/* not that we need this */
3457	control->tail_mbuf = m_notify;
3458	sctp_add_to_readq(stcb->sctp_ep, stcb,
3459	    control,
3460	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3461}
3462
3463
3464static void
3465sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3466{
3467	struct mbuf *m_notify;
3468	struct sctp_remote_error *sre;
3469	struct sctp_queued_to_read *control;
3470	size_t notif_len, chunk_len;
3471
3472	if ((stcb == NULL) ||
3473	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3474		return;
3475	}
3476	if (chunk != NULL) {
3477		chunk_len = ntohs(chunk->ch.chunk_length);
3478	} else {
3479		chunk_len = 0;
3480	}
3481	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3482	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3483	if (m_notify == NULL) {
3484		/* Retry with smaller value. */
3485		notif_len = sizeof(struct sctp_remote_error);
3486		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3487		if (m_notify == NULL) {
3488			return;
3489		}
3490	}
3491	SCTP_BUF_NEXT(m_notify) = NULL;
3492	sre = mtod(m_notify, struct sctp_remote_error *);
3493	sre->sre_type = SCTP_REMOTE_ERROR;
3494	sre->sre_flags = 0;
3495	sre->sre_length = sizeof(struct sctp_remote_error);
3496	sre->sre_error = error;
3497	sre->sre_assoc_id = sctp_get_associd(stcb);
3498	if (notif_len > sizeof(struct sctp_remote_error)) {
3499		memcpy(sre->sre_data, chunk, chunk_len);
3500		sre->sre_length += chunk_len;
3501	}
3502	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3503	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3504	    0, 0, stcb->asoc.context, 0, 0, 0,
3505	    m_notify);
3506	if (control != NULL) {
3507		control->length = SCTP_BUF_LEN(m_notify);
3508		/* not that we need this */
3509		control->tail_mbuf = m_notify;
3510		control->spec_flags = M_NOTIFICATION;
3511		sctp_add_to_readq(stcb->sctp_ep, stcb,
3512		    control,
3513		    &stcb->sctp_socket->so_rcv, 1,
3514		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3515	} else {
3516		sctp_m_freem(m_notify);
3517	}
3518}
3519
3520
3521void
3522sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3523    uint32_t error, void *data, int so_locked
3524#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3525    SCTP_UNUSED
3526#endif
3527)
3528{
3529	if ((stcb == NULL) ||
3530	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3531	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3532	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3533		/* If the socket is gone we are out of here */
3534		return;
3535	}
3536	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3537		return;
3538	}
3539	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3540	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3541		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3542		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3543		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3544			/* Don't report these in front states */
3545			return;
3546		}
3547	}
3548	switch (notification) {
3549	case SCTP_NOTIFY_ASSOC_UP:
3550		if (stcb->asoc.assoc_up_sent == 0) {
3551			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3552			stcb->asoc.assoc_up_sent = 1;
3553		}
3554		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3555			sctp_notify_adaptation_layer(stcb);
3556		}
3557		if (stcb->asoc.peer_supports_auth == 0) {
3558			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3559			    NULL, so_locked);
3560		}
3561		break;
3562	case SCTP_NOTIFY_ASSOC_DOWN:
3563		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3564		break;
3565	case SCTP_NOTIFY_INTERFACE_DOWN:
3566		{
3567			struct sctp_nets *net;
3568
3569			net = (struct sctp_nets *)data;
3570			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3571			    (struct sockaddr *)&net->ro._l_addr, error);
3572			break;
3573		}
3574	case SCTP_NOTIFY_INTERFACE_UP:
3575		{
3576			struct sctp_nets *net;
3577
3578			net = (struct sctp_nets *)data;
3579			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3580			    (struct sockaddr *)&net->ro._l_addr, error);
3581			break;
3582		}
3583	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3584		{
3585			struct sctp_nets *net;
3586
3587			net = (struct sctp_nets *)data;
3588			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3589			    (struct sockaddr *)&net->ro._l_addr, error);
3590			break;
3591		}
3592	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3593		sctp_notify_send_failed2(stcb, error,
3594		    (struct sctp_stream_queue_pending *)data, so_locked);
3595		break;
3596	case SCTP_NOTIFY_SENT_DG_FAIL:
3597		sctp_notify_send_failed(stcb, 1, error,
3598		    (struct sctp_tmit_chunk *)data, so_locked);
3599		break;
3600	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3601		sctp_notify_send_failed(stcb, 0, error,
3602		    (struct sctp_tmit_chunk *)data, so_locked);
3603		break;
3604	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3605		{
3606			uint32_t val;
3607
3608			val = *((uint32_t *) data);
3609
3610			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3611			break;
3612		}
3613	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3614		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3615		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3616			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3617		} else {
3618			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3619		}
3620		break;
3621	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3622		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3623		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3624			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3625		} else {
3626			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3627		}
3628		break;
3629	case SCTP_NOTIFY_ASSOC_RESTART:
3630		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3631		if (stcb->asoc.peer_supports_auth == 0) {
3632			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3633			    NULL, so_locked);
3634		}
3635		break;
3636	case SCTP_NOTIFY_STR_RESET_SEND:
3637		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3638		break;
3639	case SCTP_NOTIFY_STR_RESET_RECV:
3640		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3641		break;
3642	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3643		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3644		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3645		break;
3646	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3647		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3648		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3649		break;
3650	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3651		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3652		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3653		break;
3654	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3655		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3656		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3657		break;
3658	case SCTP_NOTIFY_ASCONF_ADD_IP:
3659		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3660		    error);
3661		break;
3662	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3663		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3664		    error);
3665		break;
3666	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3667		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3668		    error);
3669		break;
3670	case SCTP_NOTIFY_PEER_SHUTDOWN:
3671		sctp_notify_shutdown_event(stcb);
3672		break;
3673	case SCTP_NOTIFY_AUTH_NEW_KEY:
3674		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3675		    (uint16_t) (uintptr_t) data,
3676		    so_locked);
3677		break;
3678	case SCTP_NOTIFY_AUTH_FREE_KEY:
3679		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3680		    (uint16_t) (uintptr_t) data,
3681		    so_locked);
3682		break;
3683	case SCTP_NOTIFY_NO_PEER_AUTH:
3684		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3685		    (uint16_t) (uintptr_t) data,
3686		    so_locked);
3687		break;
3688	case SCTP_NOTIFY_SENDER_DRY:
3689		sctp_notify_sender_dry_event(stcb, so_locked);
3690		break;
3691	case SCTP_NOTIFY_REMOTE_ERROR:
3692		sctp_notify_remote_error(stcb, error, data);
3693		break;
3694	default:
3695		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3696		    __FUNCTION__, notification, notification);
3697		break;
3698	}			/* end switch */
3699}
3700
3701void
3702sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3703#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3704    SCTP_UNUSED
3705#endif
3706)
3707{
3708	struct sctp_association *asoc;
3709	struct sctp_stream_out *outs;
3710	struct sctp_tmit_chunk *chk, *nchk;
3711	struct sctp_stream_queue_pending *sp, *nsp;
3712	int i;
3713
3714	if (stcb == NULL) {
3715		return;
3716	}
3717	asoc = &stcb->asoc;
3718	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3719		/* already being freed */
3720		return;
3721	}
3722	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3723	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3724	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3725		return;
3726	}
3727	/* now through all the gunk freeing chunks */
3728	if (holds_lock == 0) {
3729		SCTP_TCB_SEND_LOCK(stcb);
3730	}
3731	/* sent queue SHOULD be empty */
3732	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3733		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3734		asoc->sent_queue_cnt--;
3735		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3736			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3737				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3738#ifdef INVARIANTS
3739			} else {
3740				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3741#endif
3742			}
3743		}
3744		if (chk->data != NULL) {
3745			sctp_free_bufspace(stcb, asoc, chk, 1);
3746			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3747			    error, chk, so_locked);
3748			if (chk->data) {
3749				sctp_m_freem(chk->data);
3750				chk->data = NULL;
3751			}
3752		}
3753		sctp_free_a_chunk(stcb, chk, so_locked);
3754		/* sa_ignore FREED_MEMORY */
3755	}
3756	/* pending send queue SHOULD be empty */
3757	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3758		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3759		asoc->send_queue_cnt--;
3760		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3761			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3762#ifdef INVARIANTS
3763		} else {
3764			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3765#endif
3766		}
3767		if (chk->data != NULL) {
3768			sctp_free_bufspace(stcb, asoc, chk, 1);
3769			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3770			    error, chk, so_locked);
3771			if (chk->data) {
3772				sctp_m_freem(chk->data);
3773				chk->data = NULL;
3774			}
3775		}
3776		sctp_free_a_chunk(stcb, chk, so_locked);
3777		/* sa_ignore FREED_MEMORY */
3778	}
3779	for (i = 0; i < asoc->streamoutcnt; i++) {
3780		/* For each stream */
3781		outs = &asoc->strmout[i];
3782		/* clean up any sends there */
3783		asoc->locked_on_sending = NULL;
3784		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3785			asoc->stream_queue_cnt--;
3786			TAILQ_REMOVE(&outs->outqueue, sp, next);
3787			sctp_free_spbufspace(stcb, asoc, sp);
3788			if (sp->data) {
3789				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3790				    error, (void *)sp, so_locked);
3791				if (sp->data) {
3792					sctp_m_freem(sp->data);
3793					sp->data = NULL;
3794					sp->tail_mbuf = NULL;
3795					sp->length = 0;
3796				}
3797			}
3798			if (sp->net) {
3799				sctp_free_remote_addr(sp->net);
3800				sp->net = NULL;
3801			}
3802			/* Free the chunk */
3803			sctp_free_a_strmoq(stcb, sp, so_locked);
3804			/* sa_ignore FREED_MEMORY */
3805		}
3806	}
3807
3808	if (holds_lock == 0) {
3809		SCTP_TCB_SEND_UNLOCK(stcb);
3810	}
3811}
3812
3813void
3814sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3815    struct sctp_abort_chunk *abort, int so_locked
3816#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3817    SCTP_UNUSED
3818#endif
3819)
3820{
3821	if (stcb == NULL) {
3822		return;
3823	}
3824	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3825	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3826	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3827		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3828	}
3829	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3830	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3831	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3832		return;
3833	}
3834	/* Tell them we lost the asoc */
3835	sctp_report_all_outbound(stcb, error, 1, so_locked);
3836	if (from_peer) {
3837		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3838	} else {
3839		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3840	}
3841}
3842
3843void
3844sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3845    struct mbuf *m, int iphlen,
3846    struct sockaddr *src, struct sockaddr *dst,
3847    struct sctphdr *sh, struct mbuf *op_err,
3848    uint8_t use_mflowid, uint32_t mflowid,
3849    uint32_t vrf_id, uint16_t port)
3850{
3851	uint32_t vtag;
3852
3853#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3854	struct socket *so;
3855
3856#endif
3857
3858	vtag = 0;
3859	if (stcb != NULL) {
3860		/* We have a TCB to abort, send notification too */
3861		vtag = stcb->asoc.peer_vtag;
3862		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3863		/* get the assoc vrf id and table id */
3864		vrf_id = stcb->asoc.vrf_id;
3865		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3866	}
3867	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3868	    use_mflowid, mflowid,
3869	    vrf_id, port);
3870	if (stcb != NULL) {
3871		/* Ok, now lets free it */
3872#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3873		so = SCTP_INP_SO(inp);
3874		atomic_add_int(&stcb->asoc.refcnt, 1);
3875		SCTP_TCB_UNLOCK(stcb);
3876		SCTP_SOCKET_LOCK(so, 1);
3877		SCTP_TCB_LOCK(stcb);
3878		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3879#endif
3880		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3881		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3882		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3883			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3884		}
3885		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3886#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3887		SCTP_SOCKET_UNLOCK(so, 1);
3888#endif
3889	}
3890}
3891
3892#ifdef SCTP_ASOCLOG_OF_TSNS
3893void
3894sctp_print_out_track_log(struct sctp_tcb *stcb)
3895{
3896#ifdef NOSIY_PRINTS
3897	int i;
3898
3899	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3900	SCTP_PRINTF("IN bound TSN log-aaa\n");
3901	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3902		SCTP_PRINTF("None rcvd\n");
3903		goto none_in;
3904	}
3905	if (stcb->asoc.tsn_in_wrapped) {
3906		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3907			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3908			    stcb->asoc.in_tsnlog[i].tsn,
3909			    stcb->asoc.in_tsnlog[i].strm,
3910			    stcb->asoc.in_tsnlog[i].seq,
3911			    stcb->asoc.in_tsnlog[i].flgs,
3912			    stcb->asoc.in_tsnlog[i].sz);
3913		}
3914	}
3915	if (stcb->asoc.tsn_in_at) {
3916		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3917			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3918			    stcb->asoc.in_tsnlog[i].tsn,
3919			    stcb->asoc.in_tsnlog[i].strm,
3920			    stcb->asoc.in_tsnlog[i].seq,
3921			    stcb->asoc.in_tsnlog[i].flgs,
3922			    stcb->asoc.in_tsnlog[i].sz);
3923		}
3924	}
3925none_in:
3926	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3927	if ((stcb->asoc.tsn_out_at == 0) &&
3928	    (stcb->asoc.tsn_out_wrapped == 0)) {
3929		SCTP_PRINTF("None sent\n");
3930	}
3931	if (stcb->asoc.tsn_out_wrapped) {
3932		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3933			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3934			    stcb->asoc.out_tsnlog[i].tsn,
3935			    stcb->asoc.out_tsnlog[i].strm,
3936			    stcb->asoc.out_tsnlog[i].seq,
3937			    stcb->asoc.out_tsnlog[i].flgs,
3938			    stcb->asoc.out_tsnlog[i].sz);
3939		}
3940	}
3941	if (stcb->asoc.tsn_out_at) {
3942		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3943			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3944			    stcb->asoc.out_tsnlog[i].tsn,
3945			    stcb->asoc.out_tsnlog[i].strm,
3946			    stcb->asoc.out_tsnlog[i].seq,
3947			    stcb->asoc.out_tsnlog[i].flgs,
3948			    stcb->asoc.out_tsnlog[i].sz);
3949		}
3950	}
3951#endif
3952}
3953
3954#endif
3955
3956void
3957sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3958    struct mbuf *op_err,
3959    int so_locked
3960#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3961    SCTP_UNUSED
3962#endif
3963)
3964{
3965#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3966	struct socket *so;
3967
3968#endif
3969
3970#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3971	so = SCTP_INP_SO(inp);
3972#endif
3973	if (stcb == NULL) {
3974		/* Got to have a TCB */
3975		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3976			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
3977				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3978				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3979			}
3980		}
3981		return;
3982	} else {
3983		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3984	}
3985	/* notify the ulp */
3986	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3987		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3988	}
3989	/* notify the peer */
3990	sctp_send_abort_tcb(stcb, op_err, so_locked);
3991	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3992	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3993	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3994		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3995	}
3996	/* now free the asoc */
3997#ifdef SCTP_ASOCLOG_OF_TSNS
3998	sctp_print_out_track_log(stcb);
3999#endif
4000#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4001	if (!so_locked) {
4002		atomic_add_int(&stcb->asoc.refcnt, 1);
4003		SCTP_TCB_UNLOCK(stcb);
4004		SCTP_SOCKET_LOCK(so, 1);
4005		SCTP_TCB_LOCK(stcb);
4006		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4007	}
4008#endif
4009	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4010#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4011	if (!so_locked) {
4012		SCTP_SOCKET_UNLOCK(so, 1);
4013	}
4014#endif
4015}
4016
4017void
4018sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4019    struct sockaddr *src, struct sockaddr *dst,
4020    struct sctphdr *sh, struct sctp_inpcb *inp,
4021    uint8_t use_mflowid, uint32_t mflowid,
4022    uint32_t vrf_id, uint16_t port)
4023{
4024	struct sctp_chunkhdr *ch, chunk_buf;
4025	unsigned int chk_length;
4026	int contains_init_chunk;
4027
4028	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4029	/* Generate a TO address for future reference */
4030	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4031		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4032			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4033			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4034		}
4035	}
4036	contains_init_chunk = 0;
4037	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4038	    sizeof(*ch), (uint8_t *) & chunk_buf);
4039	while (ch != NULL) {
4040		chk_length = ntohs(ch->chunk_length);
4041		if (chk_length < sizeof(*ch)) {
4042			/* break to abort land */
4043			break;
4044		}
4045		switch (ch->chunk_type) {
4046		case SCTP_INIT:
4047			contains_init_chunk = 1;
4048			break;
4049		case SCTP_COOKIE_ECHO:
4050			/* We hit here only if the assoc is being freed */
4051			return;
4052		case SCTP_PACKET_DROPPED:
4053			/* we don't respond to pkt-dropped */
4054			return;
4055		case SCTP_ABORT_ASSOCIATION:
4056			/* we don't respond with an ABORT to an ABORT */
4057			return;
4058		case SCTP_SHUTDOWN_COMPLETE:
4059			/*
4060			 * we ignore it since we are not waiting for it and
4061			 * peer is gone
4062			 */
4063			return;
4064		case SCTP_SHUTDOWN_ACK:
4065			sctp_send_shutdown_complete2(src, dst, sh,
4066			    use_mflowid, mflowid,
4067			    vrf_id, port);
4068			return;
4069		default:
4070			break;
4071		}
4072		offset += SCTP_SIZE32(chk_length);
4073		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4074		    sizeof(*ch), (uint8_t *) & chunk_buf);
4075	}
4076	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4077	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4078	    (contains_init_chunk == 0))) {
4079		sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL,
4080		    use_mflowid, mflowid,
4081		    vrf_id, port);
4082	}
4083}
4084
4085/*
4086 * check the inbound datagram to make sure there is not an abort inside it,
4087 * if there is return 1, else return 0.
4088 */
4089int
4090sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4091{
4092	struct sctp_chunkhdr *ch;
4093	struct sctp_init_chunk *init_chk, chunk_buf;
4094	int offset;
4095	unsigned int chk_length;
4096
4097	offset = iphlen + sizeof(struct sctphdr);
4098	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4099	    (uint8_t *) & chunk_buf);
4100	while (ch != NULL) {
4101		chk_length = ntohs(ch->chunk_length);
4102		if (chk_length < sizeof(*ch)) {
4103			/* packet is probably corrupt */
4104			break;
4105		}
4106		/* we seem to be ok, is it an abort? */
4107		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4108			/* yep, tell them */
4109			return (1);
4110		}
4111		if (ch->chunk_type == SCTP_INITIATION) {
4112			/* need to update the Vtag */
4113			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4114			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4115			if (init_chk != NULL) {
4116				*vtagfill = ntohl(init_chk->init.initiate_tag);
4117			}
4118		}
4119		/* Nope, move to the next chunk */
4120		offset += SCTP_SIZE32(chk_length);
4121		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4122		    sizeof(*ch), (uint8_t *) & chunk_buf);
4123	}
4124	return (0);
4125}
4126
4127/*
4128 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4129 * set (i.e. it's 0) so, create this function to compare link local scopes
4130 */
4131#ifdef INET6
4132uint32_t
4133sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4134{
4135	struct sockaddr_in6 a, b;
4136
4137	/* save copies */
4138	a = *addr1;
4139	b = *addr2;
4140
4141	if (a.sin6_scope_id == 0)
4142		if (sa6_recoverscope(&a)) {
4143			/* can't get scope, so can't match */
4144			return (0);
4145		}
4146	if (b.sin6_scope_id == 0)
4147		if (sa6_recoverscope(&b)) {
4148			/* can't get scope, so can't match */
4149			return (0);
4150		}
4151	if (a.sin6_scope_id != b.sin6_scope_id)
4152		return (0);
4153
4154	return (1);
4155}
4156
4157/*
4158 * returns a sockaddr_in6 with embedded scope recovered and removed
4159 */
4160struct sockaddr_in6 *
4161sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4162{
4163	/* check and strip embedded scope junk */
4164	if (addr->sin6_family == AF_INET6) {
4165		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4166			if (addr->sin6_scope_id == 0) {
4167				*store = *addr;
4168				if (!sa6_recoverscope(store)) {
4169					/* use the recovered scope */
4170					addr = store;
4171				}
4172			} else {
4173				/* else, return the original "to" addr */
4174				in6_clearscope(&addr->sin6_addr);
4175			}
4176		}
4177	}
4178	return (addr);
4179}
4180
4181#endif
4182
4183/*
4184 * are the two addresses the same?  currently a "scopeless" check returns: 1
4185 * if same, 0 if not
4186 */
4187int
4188sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4189{
4190
4191	/* must be valid */
4192	if (sa1 == NULL || sa2 == NULL)
4193		return (0);
4194
4195	/* must be the same family */
4196	if (sa1->sa_family != sa2->sa_family)
4197		return (0);
4198
4199	switch (sa1->sa_family) {
4200#ifdef INET6
4201	case AF_INET6:
4202		{
4203			/* IPv6 addresses */
4204			struct sockaddr_in6 *sin6_1, *sin6_2;
4205
4206			sin6_1 = (struct sockaddr_in6 *)sa1;
4207			sin6_2 = (struct sockaddr_in6 *)sa2;
4208			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4209			    sin6_2));
4210		}
4211#endif
4212#ifdef INET
4213	case AF_INET:
4214		{
4215			/* IPv4 addresses */
4216			struct sockaddr_in *sin_1, *sin_2;
4217
4218			sin_1 = (struct sockaddr_in *)sa1;
4219			sin_2 = (struct sockaddr_in *)sa2;
4220			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4221		}
4222#endif
4223	default:
4224		/* we don't do these... */
4225		return (0);
4226	}
4227}
4228
4229void
4230sctp_print_address(struct sockaddr *sa)
4231{
4232#ifdef INET6
4233	char ip6buf[INET6_ADDRSTRLEN];
4234
4235#endif
4236
4237	switch (sa->sa_family) {
4238#ifdef INET6
4239	case AF_INET6:
4240		{
4241			struct sockaddr_in6 *sin6;
4242
4243			sin6 = (struct sockaddr_in6 *)sa;
4244			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4245			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4246			    ntohs(sin6->sin6_port),
4247			    sin6->sin6_scope_id);
4248			break;
4249		}
4250#endif
4251#ifdef INET
4252	case AF_INET:
4253		{
4254			struct sockaddr_in *sin;
4255			unsigned char *p;
4256
4257			sin = (struct sockaddr_in *)sa;
4258			p = (unsigned char *)&sin->sin_addr;
4259			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4260			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4261			break;
4262		}
4263#endif
4264	default:
4265		SCTP_PRINTF("?\n");
4266		break;
4267	}
4268}
4269
4270void
4271sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4272    struct sctp_inpcb *new_inp,
4273    struct sctp_tcb *stcb,
4274    int waitflags)
4275{
4276	/*
4277	 * go through our old INP and pull off any control structures that
4278	 * belong to stcb and move then to the new inp.
4279	 */
4280	struct socket *old_so, *new_so;
4281	struct sctp_queued_to_read *control, *nctl;
4282	struct sctp_readhead tmp_queue;
4283	struct mbuf *m;
4284	int error = 0;
4285
4286	old_so = old_inp->sctp_socket;
4287	new_so = new_inp->sctp_socket;
4288	TAILQ_INIT(&tmp_queue);
4289	error = sblock(&old_so->so_rcv, waitflags);
4290	if (error) {
4291		/*
4292		 * Gak, can't get sblock, we have a problem. data will be
4293		 * left stranded.. and we don't dare look at it since the
4294		 * other thread may be reading something. Oh well, its a
4295		 * screwed up app that does a peeloff OR a accept while
4296		 * reading from the main socket... actually its only the
4297		 * peeloff() case, since I think read will fail on a
4298		 * listening socket..
4299		 */
4300		return;
4301	}
4302	/* lock the socket buffers */
4303	SCTP_INP_READ_LOCK(old_inp);
4304	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4305		/* Pull off all for out target stcb */
4306		if (control->stcb == stcb) {
4307			/* remove it we want it */
4308			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4309			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4310			m = control->data;
4311			while (m) {
4312				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4313					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4314				}
4315				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4316				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4317					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4318				}
4319				m = SCTP_BUF_NEXT(m);
4320			}
4321		}
4322	}
4323	SCTP_INP_READ_UNLOCK(old_inp);
4324	/* Remove the sb-lock on the old socket */
4325
4326	sbunlock(&old_so->so_rcv);
4327	/* Now we move them over to the new socket buffer */
4328	SCTP_INP_READ_LOCK(new_inp);
4329	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4330		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4331		m = control->data;
4332		while (m) {
4333			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4334				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4335			}
4336			sctp_sballoc(stcb, &new_so->so_rcv, m);
4337			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4338				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4339			}
4340			m = SCTP_BUF_NEXT(m);
4341		}
4342	}
4343	SCTP_INP_READ_UNLOCK(new_inp);
4344}
4345
4346void
4347sctp_add_to_readq(struct sctp_inpcb *inp,
4348    struct sctp_tcb *stcb,
4349    struct sctp_queued_to_read *control,
4350    struct sockbuf *sb,
4351    int end,
4352    int inp_read_lock_held,
4353    int so_locked
4354#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4355    SCTP_UNUSED
4356#endif
4357)
4358{
4359	/*
4360	 * Here we must place the control on the end of the socket read
4361	 * queue AND increment sb_cc so that select will work properly on
4362	 * read.
4363	 */
4364	struct mbuf *m, *prev = NULL;
4365
4366	if (inp == NULL) {
4367		/* Gak, TSNH!! */
4368#ifdef INVARIANTS
4369		panic("Gak, inp NULL on add_to_readq");
4370#endif
4371		return;
4372	}
4373	if (inp_read_lock_held == 0)
4374		SCTP_INP_READ_LOCK(inp);
4375	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4376		sctp_free_remote_addr(control->whoFrom);
4377		if (control->data) {
4378			sctp_m_freem(control->data);
4379			control->data = NULL;
4380		}
4381		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4382		if (inp_read_lock_held == 0)
4383			SCTP_INP_READ_UNLOCK(inp);
4384		return;
4385	}
4386	if (!(control->spec_flags & M_NOTIFICATION)) {
4387		atomic_add_int(&inp->total_recvs, 1);
4388		if (!control->do_not_ref_stcb) {
4389			atomic_add_int(&stcb->total_recvs, 1);
4390		}
4391	}
4392	m = control->data;
4393	control->held_length = 0;
4394	control->length = 0;
4395	while (m) {
4396		if (SCTP_BUF_LEN(m) == 0) {
4397			/* Skip mbufs with NO length */
4398			if (prev == NULL) {
4399				/* First one */
4400				control->data = sctp_m_free(m);
4401				m = control->data;
4402			} else {
4403				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4404				m = SCTP_BUF_NEXT(prev);
4405			}
4406			if (m == NULL) {
4407				control->tail_mbuf = prev;
4408			}
4409			continue;
4410		}
4411		prev = m;
4412		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4413			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4414		}
4415		sctp_sballoc(stcb, sb, m);
4416		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4417			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4418		}
4419		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4420		m = SCTP_BUF_NEXT(m);
4421	}
4422	if (prev != NULL) {
4423		control->tail_mbuf = prev;
4424	} else {
4425		/* Everything got collapsed out?? */
4426		sctp_free_remote_addr(control->whoFrom);
4427		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4428		if (inp_read_lock_held == 0)
4429			SCTP_INP_READ_UNLOCK(inp);
4430		return;
4431	}
4432	if (end) {
4433		control->end_added = 1;
4434	}
4435	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4436	if (inp_read_lock_held == 0)
4437		SCTP_INP_READ_UNLOCK(inp);
4438	if (inp && inp->sctp_socket) {
4439		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4440			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4441		} else {
4442#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4443			struct socket *so;
4444
4445			so = SCTP_INP_SO(inp);
4446			if (!so_locked) {
4447				if (stcb) {
4448					atomic_add_int(&stcb->asoc.refcnt, 1);
4449					SCTP_TCB_UNLOCK(stcb);
4450				}
4451				SCTP_SOCKET_LOCK(so, 1);
4452				if (stcb) {
4453					SCTP_TCB_LOCK(stcb);
4454					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4455				}
4456				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4457					SCTP_SOCKET_UNLOCK(so, 1);
4458					return;
4459				}
4460			}
4461#endif
4462			sctp_sorwakeup(inp, inp->sctp_socket);
4463#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4464			if (!so_locked) {
4465				SCTP_SOCKET_UNLOCK(so, 1);
4466			}
4467#endif
4468		}
4469	}
4470}
4471
4472
4473int
4474sctp_append_to_readq(struct sctp_inpcb *inp,
4475    struct sctp_tcb *stcb,
4476    struct sctp_queued_to_read *control,
4477    struct mbuf *m,
4478    int end,
4479    int ctls_cumack,
4480    struct sockbuf *sb)
4481{
4482	/*
4483	 * A partial delivery API event is underway. OR we are appending on
4484	 * the reassembly queue.
4485	 *
4486	 * If PDAPI this means we need to add m to the end of the data.
4487	 * Increase the length in the control AND increment the sb_cc.
4488	 * Otherwise sb is NULL and all we need to do is put it at the end
4489	 * of the mbuf chain.
4490	 */
4491	int len = 0;
4492	struct mbuf *mm, *tail = NULL, *prev = NULL;
4493
4494	if (inp) {
4495		SCTP_INP_READ_LOCK(inp);
4496	}
4497	if (control == NULL) {
4498get_out:
4499		if (inp) {
4500			SCTP_INP_READ_UNLOCK(inp);
4501		}
4502		return (-1);
4503	}
4504	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4505		SCTP_INP_READ_UNLOCK(inp);
4506		return (0);
4507	}
4508	if (control->end_added) {
4509		/* huh this one is complete? */
4510		goto get_out;
4511	}
4512	mm = m;
4513	if (mm == NULL) {
4514		goto get_out;
4515	}
4516	while (mm) {
4517		if (SCTP_BUF_LEN(mm) == 0) {
4518			/* Skip mbufs with NO lenght */
4519			if (prev == NULL) {
4520				/* First one */
4521				m = sctp_m_free(mm);
4522				mm = m;
4523			} else {
4524				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4525				mm = SCTP_BUF_NEXT(prev);
4526			}
4527			continue;
4528		}
4529		prev = mm;
4530		len += SCTP_BUF_LEN(mm);
4531		if (sb) {
4532			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4533				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4534			}
4535			sctp_sballoc(stcb, sb, mm);
4536			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4537				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4538			}
4539		}
4540		mm = SCTP_BUF_NEXT(mm);
4541	}
4542	if (prev) {
4543		tail = prev;
4544	} else {
4545		/* Really there should always be a prev */
4546		if (m == NULL) {
4547			/* Huh nothing left? */
4548#ifdef INVARIANTS
4549			panic("Nothing left to add?");
4550#else
4551			goto get_out;
4552#endif
4553		}
4554		tail = m;
4555	}
4556	if (control->tail_mbuf) {
4557		/* append */
4558		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4559		control->tail_mbuf = tail;
4560	} else {
4561		/* nothing there */
4562#ifdef INVARIANTS
4563		if (control->data != NULL) {
4564			panic("This should NOT happen");
4565		}
4566#endif
4567		control->data = m;
4568		control->tail_mbuf = tail;
4569	}
4570	atomic_add_int(&control->length, len);
4571	if (end) {
4572		/* message is complete */
4573		if (stcb && (control == stcb->asoc.control_pdapi)) {
4574			stcb->asoc.control_pdapi = NULL;
4575		}
4576		control->held_length = 0;
4577		control->end_added = 1;
4578	}
4579	if (stcb == NULL) {
4580		control->do_not_ref_stcb = 1;
4581	}
4582	/*
4583	 * When we are appending in partial delivery, the cum-ack is used
4584	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4585	 * is populated in the outbound sinfo structure from the true cumack
4586	 * if the association exists...
4587	 */
4588	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4589	if (inp) {
4590		SCTP_INP_READ_UNLOCK(inp);
4591	}
4592	if (inp && inp->sctp_socket) {
4593		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4594			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4595		} else {
4596#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4597			struct socket *so;
4598
4599			so = SCTP_INP_SO(inp);
4600			if (stcb) {
4601				atomic_add_int(&stcb->asoc.refcnt, 1);
4602				SCTP_TCB_UNLOCK(stcb);
4603			}
4604			SCTP_SOCKET_LOCK(so, 1);
4605			if (stcb) {
4606				SCTP_TCB_LOCK(stcb);
4607				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4608			}
4609			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4610				SCTP_SOCKET_UNLOCK(so, 1);
4611				return (0);
4612			}
4613#endif
4614			sctp_sorwakeup(inp, inp->sctp_socket);
4615#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4616			SCTP_SOCKET_UNLOCK(so, 1);
4617#endif
4618		}
4619	}
4620	return (0);
4621}
4622
4623
4624
4625/*************HOLD THIS COMMENT FOR PATCH FILE OF
4626 *************ALTERNATE ROUTING CODE
4627 */
4628
4629/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4630 *************ALTERNATE ROUTING CODE
4631 */
4632
4633struct mbuf *
4634sctp_generate_invmanparam(int err)
4635{
4636	/* Return a MBUF with a invalid mandatory parameter */
4637	struct mbuf *m;
4638
4639	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
4640	if (m) {
4641		struct sctp_paramhdr *ph;
4642
4643		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4644		ph = mtod(m, struct sctp_paramhdr *);
4645		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4646		ph->param_type = htons(err);
4647	}
4648	return (m);
4649}
4650
4651#ifdef SCTP_MBCNT_LOGGING
4652void
4653sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4654    struct sctp_tmit_chunk *tp1, int chk_cnt)
4655{
4656	if (tp1->data == NULL) {
4657		return;
4658	}
4659	asoc->chunks_on_out_queue -= chk_cnt;
4660	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4661		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4662		    asoc->total_output_queue_size,
4663		    tp1->book_size,
4664		    0,
4665		    tp1->mbcnt);
4666	}
4667	if (asoc->total_output_queue_size >= tp1->book_size) {
4668		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4669	} else {
4670		asoc->total_output_queue_size = 0;
4671	}
4672
4673	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4674	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4675		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4676			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4677		} else {
4678			stcb->sctp_socket->so_snd.sb_cc = 0;
4679
4680		}
4681	}
4682}
4683
4684#endif
4685
4686int
4687sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4688    uint8_t sent, int so_locked
4689#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4690    SCTP_UNUSED
4691#endif
4692)
4693{
4694	struct sctp_stream_out *strq;
4695	struct sctp_tmit_chunk *chk = NULL, *tp2;
4696	struct sctp_stream_queue_pending *sp;
4697	uint16_t stream = 0, seq = 0;
4698	uint8_t foundeom = 0;
4699	int ret_sz = 0;
4700	int notdone;
4701	int do_wakeup_routine = 0;
4702
4703	stream = tp1->rec.data.stream_number;
4704	seq = tp1->rec.data.stream_seq;
4705	do {
4706		ret_sz += tp1->book_size;
4707		if (tp1->data != NULL) {
4708			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4709				sctp_flight_size_decrease(tp1);
4710				sctp_total_flight_decrease(stcb, tp1);
4711			}
4712			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4713			stcb->asoc.peers_rwnd += tp1->send_size;
4714			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4715			if (sent) {
4716				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4717			} else {
4718				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4719			}
4720			if (tp1->data) {
4721				sctp_m_freem(tp1->data);
4722				tp1->data = NULL;
4723			}
4724			do_wakeup_routine = 1;
4725			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4726				stcb->asoc.sent_queue_cnt_removeable--;
4727			}
4728		}
4729		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4730		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4731		    SCTP_DATA_NOT_FRAG) {
4732			/* not frag'ed we ae done   */
4733			notdone = 0;
4734			foundeom = 1;
4735		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4736			/* end of frag, we are done */
4737			notdone = 0;
4738			foundeom = 1;
4739		} else {
4740			/*
4741			 * Its a begin or middle piece, we must mark all of
4742			 * it
4743			 */
4744			notdone = 1;
4745			tp1 = TAILQ_NEXT(tp1, sctp_next);
4746		}
4747	} while (tp1 && notdone);
4748	if (foundeom == 0) {
4749		/*
4750		 * The multi-part message was scattered across the send and
4751		 * sent queue.
4752		 */
4753		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4754			if ((tp1->rec.data.stream_number != stream) ||
4755			    (tp1->rec.data.stream_seq != seq)) {
4756				break;
4757			}
4758			/*
4759			 * save to chk in case we have some on stream out
4760			 * queue. If so and we have an un-transmitted one we
4761			 * don't have to fudge the TSN.
4762			 */
4763			chk = tp1;
4764			ret_sz += tp1->book_size;
4765			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4766			if (sent) {
4767				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4768			} else {
4769				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4770			}
4771			if (tp1->data) {
4772				sctp_m_freem(tp1->data);
4773				tp1->data = NULL;
4774			}
4775			/* No flight involved here book the size to 0 */
4776			tp1->book_size = 0;
4777			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4778				foundeom = 1;
4779			}
4780			do_wakeup_routine = 1;
4781			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4782			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4783			/*
4784			 * on to the sent queue so we can wait for it to be
4785			 * passed by.
4786			 */
4787			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4788			    sctp_next);
4789			stcb->asoc.send_queue_cnt--;
4790			stcb->asoc.sent_queue_cnt++;
4791		}
4792	}
4793	if (foundeom == 0) {
4794		/*
4795		 * Still no eom found. That means there is stuff left on the
4796		 * stream out queue.. yuck.
4797		 */
4798		SCTP_TCB_SEND_LOCK(stcb);
4799		strq = &stcb->asoc.strmout[stream];
4800		sp = TAILQ_FIRST(&strq->outqueue);
4801		if (sp != NULL) {
4802			sp->discard_rest = 1;
4803			/*
4804			 * We may need to put a chunk on the queue that
4805			 * holds the TSN that would have been sent with the
4806			 * LAST bit.
4807			 */
4808			if (chk == NULL) {
4809				/* Yep, we have to */
4810				sctp_alloc_a_chunk(stcb, chk);
4811				if (chk == NULL) {
4812					/*
4813					 * we are hosed. All we can do is
4814					 * nothing.. which will cause an
4815					 * abort if the peer is paying
4816					 * attention.
4817					 */
4818					goto oh_well;
4819				}
4820				memset(chk, 0, sizeof(*chk));
4821				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4822				chk->sent = SCTP_FORWARD_TSN_SKIP;
4823				chk->asoc = &stcb->asoc;
4824				chk->rec.data.stream_seq = strq->next_sequence_send;
4825				chk->rec.data.stream_number = sp->stream;
4826				chk->rec.data.payloadtype = sp->ppid;
4827				chk->rec.data.context = sp->context;
4828				chk->flags = sp->act_flags;
4829				if (sp->net)
4830					chk->whoTo = sp->net;
4831				else
4832					chk->whoTo = stcb->asoc.primary_destination;
4833				atomic_add_int(&chk->whoTo->ref_count, 1);
4834				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4835				stcb->asoc.pr_sctp_cnt++;
4836				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4837				stcb->asoc.sent_queue_cnt++;
4838				stcb->asoc.pr_sctp_cnt++;
4839			} else {
4840				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4841			}
4842			strq->next_sequence_send++;
4843	oh_well:
4844			if (sp->data) {
4845				/*
4846				 * Pull any data to free up the SB and allow
4847				 * sender to "add more" while we will throw
4848				 * away :-)
4849				 */
4850				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4851				ret_sz += sp->length;
4852				do_wakeup_routine = 1;
4853				sp->some_taken = 1;
4854				sctp_m_freem(sp->data);
4855				sp->data = NULL;
4856				sp->tail_mbuf = NULL;
4857				sp->length = 0;
4858			}
4859		}
4860		SCTP_TCB_SEND_UNLOCK(stcb);
4861	}
4862	if (do_wakeup_routine) {
4863#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4864		struct socket *so;
4865
4866		so = SCTP_INP_SO(stcb->sctp_ep);
4867		if (!so_locked) {
4868			atomic_add_int(&stcb->asoc.refcnt, 1);
4869			SCTP_TCB_UNLOCK(stcb);
4870			SCTP_SOCKET_LOCK(so, 1);
4871			SCTP_TCB_LOCK(stcb);
4872			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4873			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4874				/* assoc was freed while we were unlocked */
4875				SCTP_SOCKET_UNLOCK(so, 1);
4876				return (ret_sz);
4877			}
4878		}
4879#endif
4880		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4881#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4882		if (!so_locked) {
4883			SCTP_SOCKET_UNLOCK(so, 1);
4884		}
4885#endif
4886	}
4887	return (ret_sz);
4888}
4889
4890/*
4891 * checks to see if the given address, sa, is one that is currently known by
4892 * the kernel note: can't distinguish the same address on multiple interfaces
4893 * and doesn't handle multiple addresses with different zone/scope id's note:
4894 * ifa_ifwithaddr() compares the entire sockaddr struct
4895 */
4896struct sctp_ifa *
4897sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4898    int holds_lock)
4899{
4900	struct sctp_laddr *laddr;
4901
4902	if (holds_lock == 0) {
4903		SCTP_INP_RLOCK(inp);
4904	}
4905	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4906		if (laddr->ifa == NULL)
4907			continue;
4908		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4909			continue;
4910#ifdef INET
4911		if (addr->sa_family == AF_INET) {
4912			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4913			    laddr->ifa->address.sin.sin_addr.s_addr) {
4914				/* found him. */
4915				if (holds_lock == 0) {
4916					SCTP_INP_RUNLOCK(inp);
4917				}
4918				return (laddr->ifa);
4919				break;
4920			}
4921		}
4922#endif
4923#ifdef INET6
4924		if (addr->sa_family == AF_INET6) {
4925			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4926			    &laddr->ifa->address.sin6)) {
4927				/* found him. */
4928				if (holds_lock == 0) {
4929					SCTP_INP_RUNLOCK(inp);
4930				}
4931				return (laddr->ifa);
4932				break;
4933			}
4934		}
4935#endif
4936	}
4937	if (holds_lock == 0) {
4938		SCTP_INP_RUNLOCK(inp);
4939	}
4940	return (NULL);
4941}
4942
4943uint32_t
4944sctp_get_ifa_hash_val(struct sockaddr *addr)
4945{
4946	switch (addr->sa_family) {
4947#ifdef INET
4948	case AF_INET:
4949		{
4950			struct sockaddr_in *sin;
4951
4952			sin = (struct sockaddr_in *)addr;
4953			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4954		}
4955#endif
4956#ifdef INET6
4957	case AF_INET6:
4958		{
4959			struct sockaddr_in6 *sin6;
4960			uint32_t hash_of_addr;
4961
4962			sin6 = (struct sockaddr_in6 *)addr;
4963			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4964			    sin6->sin6_addr.s6_addr32[1] +
4965			    sin6->sin6_addr.s6_addr32[2] +
4966			    sin6->sin6_addr.s6_addr32[3]);
4967			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4968			return (hash_of_addr);
4969		}
4970#endif
4971	default:
4972		break;
4973	}
4974	return (0);
4975}
4976
4977struct sctp_ifa *
4978sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4979{
4980	struct sctp_ifa *sctp_ifap;
4981	struct sctp_vrf *vrf;
4982	struct sctp_ifalist *hash_head;
4983	uint32_t hash_of_addr;
4984
4985	if (holds_lock == 0)
4986		SCTP_IPI_ADDR_RLOCK();
4987
4988	vrf = sctp_find_vrf(vrf_id);
4989	if (vrf == NULL) {
4990stage_right:
4991		if (holds_lock == 0)
4992			SCTP_IPI_ADDR_RUNLOCK();
4993		return (NULL);
4994	}
4995	hash_of_addr = sctp_get_ifa_hash_val(addr);
4996
4997	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4998	if (hash_head == NULL) {
4999		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5000		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5001		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5002		sctp_print_address(addr);
5003		SCTP_PRINTF("No such bucket for address\n");
5004		if (holds_lock == 0)
5005			SCTP_IPI_ADDR_RUNLOCK();
5006
5007		return (NULL);
5008	}
5009	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5010		if (sctp_ifap == NULL) {
5011#ifdef INVARIANTS
5012			panic("Huh LIST_FOREACH corrupt");
5013			goto stage_right;
5014#else
5015			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5016			goto stage_right;
5017#endif
5018		}
5019		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5020			continue;
5021#ifdef INET
5022		if (addr->sa_family == AF_INET) {
5023			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5024			    sctp_ifap->address.sin.sin_addr.s_addr) {
5025				/* found him. */
5026				if (holds_lock == 0)
5027					SCTP_IPI_ADDR_RUNLOCK();
5028				return (sctp_ifap);
5029				break;
5030			}
5031		}
5032#endif
5033#ifdef INET6
5034		if (addr->sa_family == AF_INET6) {
5035			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5036			    &sctp_ifap->address.sin6)) {
5037				/* found him. */
5038				if (holds_lock == 0)
5039					SCTP_IPI_ADDR_RUNLOCK();
5040				return (sctp_ifap);
5041				break;
5042			}
5043		}
5044#endif
5045	}
5046	if (holds_lock == 0)
5047		SCTP_IPI_ADDR_RUNLOCK();
5048	return (NULL);
5049}
5050
5051static void
5052sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5053    uint32_t rwnd_req)
5054{
5055	/* User pulled some data, do we need a rwnd update? */
5056	int r_unlocked = 0;
5057	uint32_t dif, rwnd;
5058	struct socket *so = NULL;
5059
5060	if (stcb == NULL)
5061		return;
5062
5063	atomic_add_int(&stcb->asoc.refcnt, 1);
5064
5065	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5066	    SCTP_STATE_SHUTDOWN_RECEIVED |
5067	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5068		/* Pre-check If we are freeing no update */
5069		goto no_lock;
5070	}
5071	SCTP_INP_INCR_REF(stcb->sctp_ep);
5072	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5073	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5074		goto out;
5075	}
5076	so = stcb->sctp_socket;
5077	if (so == NULL) {
5078		goto out;
5079	}
5080	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5081	/* Have you have freed enough to look */
5082	*freed_so_far = 0;
5083	/* Yep, its worth a look and the lock overhead */
5084
5085	/* Figure out what the rwnd would be */
5086	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5087	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5088		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5089	} else {
5090		dif = 0;
5091	}
5092	if (dif >= rwnd_req) {
5093		if (hold_rlock) {
5094			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5095			r_unlocked = 1;
5096		}
5097		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5098			/*
5099			 * One last check before we allow the guy possibly
5100			 * to get in. There is a race, where the guy has not
5101			 * reached the gate. In that case
5102			 */
5103			goto out;
5104		}
5105		SCTP_TCB_LOCK(stcb);
5106		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5107			/* No reports here */
5108			SCTP_TCB_UNLOCK(stcb);
5109			goto out;
5110		}
5111		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5112		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5113
5114		sctp_chunk_output(stcb->sctp_ep, stcb,
5115		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5116		/* make sure no timer is running */
5117		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5118		SCTP_TCB_UNLOCK(stcb);
5119	} else {
5120		/* Update how much we have pending */
5121		stcb->freed_by_sorcv_sincelast = dif;
5122	}
5123out:
5124	if (so && r_unlocked && hold_rlock) {
5125		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5126	}
5127	SCTP_INP_DECR_REF(stcb->sctp_ep);
5128no_lock:
5129	atomic_add_int(&stcb->asoc.refcnt, -1);
5130	return;
5131}
5132
5133int
5134sctp_sorecvmsg(struct socket *so,
5135    struct uio *uio,
5136    struct mbuf **mp,
5137    struct sockaddr *from,
5138    int fromlen,
5139    int *msg_flags,
5140    struct sctp_sndrcvinfo *sinfo,
5141    int filling_sinfo)
5142{
5143	/*
5144	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5145	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5146	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5147	 * On the way out we may send out any combination of:
5148	 * MSG_NOTIFICATION MSG_EOR
5149	 *
5150	 */
5151	struct sctp_inpcb *inp = NULL;
5152	int my_len = 0;
5153	int cp_len = 0, error = 0;
5154	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5155	struct mbuf *m = NULL;
5156	struct sctp_tcb *stcb = NULL;
5157	int wakeup_read_socket = 0;
5158	int freecnt_applied = 0;
5159	int out_flags = 0, in_flags = 0;
5160	int block_allowed = 1;
5161	uint32_t freed_so_far = 0;
5162	uint32_t copied_so_far = 0;
5163	int in_eeor_mode = 0;
5164	int no_rcv_needed = 0;
5165	uint32_t rwnd_req = 0;
5166	int hold_sblock = 0;
5167	int hold_rlock = 0;
5168	int slen = 0;
5169	uint32_t held_length = 0;
5170	int sockbuf_lock = 0;
5171
5172	if (uio == NULL) {
5173		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5174		return (EINVAL);
5175	}
5176	if (msg_flags) {
5177		in_flags = *msg_flags;
5178		if (in_flags & MSG_PEEK)
5179			SCTP_STAT_INCR(sctps_read_peeks);
5180	} else {
5181		in_flags = 0;
5182	}
5183	slen = uio->uio_resid;
5184
5185	/* Pull in and set up our int flags */
5186	if (in_flags & MSG_OOB) {
5187		/* Out of band's NOT supported */
5188		return (EOPNOTSUPP);
5189	}
5190	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5191		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5192		return (EINVAL);
5193	}
5194	if ((in_flags & (MSG_DONTWAIT
5195	    | MSG_NBIO
5196	    )) ||
5197	    SCTP_SO_IS_NBIO(so)) {
5198		block_allowed = 0;
5199	}
5200	/* setup the endpoint */
5201	inp = (struct sctp_inpcb *)so->so_pcb;
5202	if (inp == NULL) {
5203		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5204		return (EFAULT);
5205	}
5206	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5207	/* Must be at least a MTU's worth */
5208	if (rwnd_req < SCTP_MIN_RWND)
5209		rwnd_req = SCTP_MIN_RWND;
5210	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5211	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5212		sctp_misc_ints(SCTP_SORECV_ENTER,
5213		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5214	}
5215	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5216		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5217		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5218	}
5219	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5220	if (error) {
5221		goto release_unlocked;
5222	}
5223	sockbuf_lock = 1;
5224restart:
5225
5226
5227restart_nosblocks:
5228	if (hold_sblock == 0) {
5229		SOCKBUF_LOCK(&so->so_rcv);
5230		hold_sblock = 1;
5231	}
5232	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5233	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5234		goto out;
5235	}
5236	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5237		if (so->so_error) {
5238			error = so->so_error;
5239			if ((in_flags & MSG_PEEK) == 0)
5240				so->so_error = 0;
5241			goto out;
5242		} else {
5243			if (so->so_rcv.sb_cc == 0) {
5244				/* indicate EOF */
5245				error = 0;
5246				goto out;
5247			}
5248		}
5249	}
5250	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5251		/* we need to wait for data */
5252		if ((so->so_rcv.sb_cc == 0) &&
5253		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5254		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5255			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5256				/*
5257				 * For active open side clear flags for
5258				 * re-use passive open is blocked by
5259				 * connect.
5260				 */
5261				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5262					/*
5263					 * You were aborted, passive side
5264					 * always hits here
5265					 */
5266					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5267					error = ECONNRESET;
5268				}
5269				so->so_state &= ~(SS_ISCONNECTING |
5270				    SS_ISDISCONNECTING |
5271				    SS_ISCONFIRMING |
5272				    SS_ISCONNECTED);
5273				if (error == 0) {
5274					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5275						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5276						error = ENOTCONN;
5277					}
5278				}
5279				goto out;
5280			}
5281		}
5282		error = sbwait(&so->so_rcv);
5283		if (error) {
5284			goto out;
5285		}
5286		held_length = 0;
5287		goto restart_nosblocks;
5288	} else if (so->so_rcv.sb_cc == 0) {
5289		if (so->so_error) {
5290			error = so->so_error;
5291			if ((in_flags & MSG_PEEK) == 0)
5292				so->so_error = 0;
5293		} else {
5294			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5295			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5296				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5297					/*
5298					 * For active open side clear flags
5299					 * for re-use passive open is
5300					 * blocked by connect.
5301					 */
5302					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5303						/*
5304						 * You were aborted, passive
5305						 * side always hits here
5306						 */
5307						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5308						error = ECONNRESET;
5309					}
5310					so->so_state &= ~(SS_ISCONNECTING |
5311					    SS_ISDISCONNECTING |
5312					    SS_ISCONFIRMING |
5313					    SS_ISCONNECTED);
5314					if (error == 0) {
5315						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5316							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5317							error = ENOTCONN;
5318						}
5319					}
5320					goto out;
5321				}
5322			}
5323			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5324			error = EWOULDBLOCK;
5325		}
5326		goto out;
5327	}
5328	if (hold_sblock == 1) {
5329		SOCKBUF_UNLOCK(&so->so_rcv);
5330		hold_sblock = 0;
5331	}
5332	/* we possibly have data we can read */
5333	/* sa_ignore FREED_MEMORY */
5334	control = TAILQ_FIRST(&inp->read_queue);
5335	if (control == NULL) {
5336		/*
5337		 * This could be happening since the appender did the
5338		 * increment but as not yet did the tailq insert onto the
5339		 * read_queue
5340		 */
5341		if (hold_rlock == 0) {
5342			SCTP_INP_READ_LOCK(inp);
5343		}
5344		control = TAILQ_FIRST(&inp->read_queue);
5345		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5346#ifdef INVARIANTS
5347			panic("Huh, its non zero and nothing on control?");
5348#endif
5349			so->so_rcv.sb_cc = 0;
5350		}
5351		SCTP_INP_READ_UNLOCK(inp);
5352		hold_rlock = 0;
5353		goto restart;
5354	}
5355	if ((control->length == 0) &&
5356	    (control->do_not_ref_stcb)) {
5357		/*
5358		 * Clean up code for freeing assoc that left behind a
5359		 * pdapi.. maybe a peer in EEOR that just closed after
5360		 * sending and never indicated a EOR.
5361		 */
5362		if (hold_rlock == 0) {
5363			hold_rlock = 1;
5364			SCTP_INP_READ_LOCK(inp);
5365		}
5366		control->held_length = 0;
5367		if (control->data) {
5368			/* Hmm there is data here .. fix */
5369			struct mbuf *m_tmp;
5370			int cnt = 0;
5371
5372			m_tmp = control->data;
5373			while (m_tmp) {
5374				cnt += SCTP_BUF_LEN(m_tmp);
5375				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5376					control->tail_mbuf = m_tmp;
5377					control->end_added = 1;
5378				}
5379				m_tmp = SCTP_BUF_NEXT(m_tmp);
5380			}
5381			control->length = cnt;
5382		} else {
5383			/* remove it */
5384			TAILQ_REMOVE(&inp->read_queue, control, next);
5385			/* Add back any hiddend data */
5386			sctp_free_remote_addr(control->whoFrom);
5387			sctp_free_a_readq(stcb, control);
5388		}
5389		if (hold_rlock) {
5390			hold_rlock = 0;
5391			SCTP_INP_READ_UNLOCK(inp);
5392		}
5393		goto restart;
5394	}
5395	if ((control->length == 0) &&
5396	    (control->end_added == 1)) {
5397		/*
5398		 * Do we also need to check for (control->pdapi_aborted ==
5399		 * 1)?
5400		 */
5401		if (hold_rlock == 0) {
5402			hold_rlock = 1;
5403			SCTP_INP_READ_LOCK(inp);
5404		}
5405		TAILQ_REMOVE(&inp->read_queue, control, next);
5406		if (control->data) {
5407#ifdef INVARIANTS
5408			panic("control->data not null but control->length == 0");
5409#else
5410			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5411			sctp_m_freem(control->data);
5412			control->data = NULL;
5413#endif
5414		}
5415		if (control->aux_data) {
5416			sctp_m_free(control->aux_data);
5417			control->aux_data = NULL;
5418		}
5419		sctp_free_remote_addr(control->whoFrom);
5420		sctp_free_a_readq(stcb, control);
5421		if (hold_rlock) {
5422			hold_rlock = 0;
5423			SCTP_INP_READ_UNLOCK(inp);
5424		}
5425		goto restart;
5426	}
5427	if (control->length == 0) {
5428		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5429		    (filling_sinfo)) {
5430			/* find a more suitable one then this */
5431			ctl = TAILQ_NEXT(control, next);
5432			while (ctl) {
5433				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5434				    (ctl->some_taken ||
5435				    (ctl->spec_flags & M_NOTIFICATION) ||
5436				    ((ctl->do_not_ref_stcb == 0) &&
5437				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5438				    ) {
5439					/*-
5440					 * If we have a different TCB next, and there is data
5441					 * present. If we have already taken some (pdapi), OR we can
5442					 * ref the tcb and no delivery as started on this stream, we
5443					 * take it. Note we allow a notification on a different
5444					 * assoc to be delivered..
5445					 */
5446					control = ctl;
5447					goto found_one;
5448				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5449					    (ctl->length) &&
5450					    ((ctl->some_taken) ||
5451					    ((ctl->do_not_ref_stcb == 0) &&
5452					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5453				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5454					/*-
5455					 * If we have the same tcb, and there is data present, and we
5456					 * have the strm interleave feature present. Then if we have
5457					 * taken some (pdapi) or we can refer to tht tcb AND we have
5458					 * not started a delivery for this stream, we can take it.
5459					 * Note we do NOT allow a notificaiton on the same assoc to
5460					 * be delivered.
5461					 */
5462					control = ctl;
5463					goto found_one;
5464				}
5465				ctl = TAILQ_NEXT(ctl, next);
5466			}
5467		}
5468		/*
5469		 * if we reach here, not suitable replacement is available
5470		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5471		 * into the our held count, and its time to sleep again.
5472		 */
5473		held_length = so->so_rcv.sb_cc;
5474		control->held_length = so->so_rcv.sb_cc;
5475		goto restart;
5476	}
5477	/* Clear the held length since there is something to read */
5478	control->held_length = 0;
5479	if (hold_rlock) {
5480		SCTP_INP_READ_UNLOCK(inp);
5481		hold_rlock = 0;
5482	}
5483found_one:
5484	/*
5485	 * If we reach here, control has a some data for us to read off.
5486	 * Note that stcb COULD be NULL.
5487	 */
5488	control->some_taken++;
5489	if (hold_sblock) {
5490		SOCKBUF_UNLOCK(&so->so_rcv);
5491		hold_sblock = 0;
5492	}
5493	stcb = control->stcb;
5494	if (stcb) {
5495		if ((control->do_not_ref_stcb == 0) &&
5496		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5497			if (freecnt_applied == 0)
5498				stcb = NULL;
5499		} else if (control->do_not_ref_stcb == 0) {
5500			/* you can't free it on me please */
5501			/*
5502			 * The lock on the socket buffer protects us so the
5503			 * free code will stop. But since we used the
5504			 * socketbuf lock and the sender uses the tcb_lock
5505			 * to increment, we need to use the atomic add to
5506			 * the refcnt
5507			 */
5508			if (freecnt_applied) {
5509#ifdef INVARIANTS
5510				panic("refcnt already incremented");
5511#else
5512				SCTP_PRINTF("refcnt already incremented?\n");
5513#endif
5514			} else {
5515				atomic_add_int(&stcb->asoc.refcnt, 1);
5516				freecnt_applied = 1;
5517			}
5518			/*
5519			 * Setup to remember how much we have not yet told
5520			 * the peer our rwnd has opened up. Note we grab the
5521			 * value from the tcb from last time. Note too that
5522			 * sack sending clears this when a sack is sent,
5523			 * which is fine. Once we hit the rwnd_req, we then
5524			 * will go to the sctp_user_rcvd() that will not
5525			 * lock until it KNOWs it MUST send a WUP-SACK.
5526			 */
5527			freed_so_far = stcb->freed_by_sorcv_sincelast;
5528			stcb->freed_by_sorcv_sincelast = 0;
5529		}
5530	}
5531	if (stcb &&
5532	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5533	    control->do_not_ref_stcb == 0) {
5534		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5535	}
5536	/* First lets get off the sinfo and sockaddr info */
5537	if ((sinfo) && filling_sinfo) {
5538		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5539		nxt = TAILQ_NEXT(control, next);
5540		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5541		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5542			struct sctp_extrcvinfo *s_extra;
5543
5544			s_extra = (struct sctp_extrcvinfo *)sinfo;
5545			if ((nxt) &&
5546			    (nxt->length)) {
5547				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5548				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5549					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5550				}
5551				if (nxt->spec_flags & M_NOTIFICATION) {
5552					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5553				}
5554				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5555				s_extra->sreinfo_next_length = nxt->length;
5556				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5557				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5558				if (nxt->tail_mbuf != NULL) {
5559					if (nxt->end_added) {
5560						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5561					}
5562				}
5563			} else {
5564				/*
5565				 * we explicitly 0 this, since the memcpy
5566				 * got some other things beyond the older
5567				 * sinfo_ that is on the control's structure
5568				 * :-D
5569				 */
5570				nxt = NULL;
5571				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5572				s_extra->sreinfo_next_aid = 0;
5573				s_extra->sreinfo_next_length = 0;
5574				s_extra->sreinfo_next_ppid = 0;
5575				s_extra->sreinfo_next_stream = 0;
5576			}
5577		}
5578		/*
5579		 * update off the real current cum-ack, if we have an stcb.
5580		 */
5581		if ((control->do_not_ref_stcb == 0) && stcb)
5582			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5583		/*
5584		 * mask off the high bits, we keep the actual chunk bits in
5585		 * there.
5586		 */
5587		sinfo->sinfo_flags &= 0x00ff;
5588		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5589			sinfo->sinfo_flags |= SCTP_UNORDERED;
5590		}
5591	}
5592#ifdef SCTP_ASOCLOG_OF_TSNS
5593	{
5594		int index, newindex;
5595		struct sctp_pcbtsn_rlog *entry;
5596
5597		do {
5598			index = inp->readlog_index;
5599			newindex = index + 1;
5600			if (newindex >= SCTP_READ_LOG_SIZE) {
5601				newindex = 0;
5602			}
5603		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5604		entry = &inp->readlog[index];
5605		entry->vtag = control->sinfo_assoc_id;
5606		entry->strm = control->sinfo_stream;
5607		entry->seq = control->sinfo_ssn;
5608		entry->sz = control->length;
5609		entry->flgs = control->sinfo_flags;
5610	}
5611#endif
5612	if (fromlen && from) {
5613		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5614		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5615#ifdef INET6
5616		case AF_INET6:
5617			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5618			break;
5619#endif
5620#ifdef INET
5621		case AF_INET:
5622			((struct sockaddr_in *)from)->sin_port = control->port_from;
5623			break;
5624#endif
5625		default:
5626			break;
5627		}
5628		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5629
5630#if defined(INET) && defined(INET6)
5631		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5632		    (from->sa_family == AF_INET) &&
5633		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5634			struct sockaddr_in *sin;
5635			struct sockaddr_in6 sin6;
5636
5637			sin = (struct sockaddr_in *)from;
5638			bzero(&sin6, sizeof(sin6));
5639			sin6.sin6_family = AF_INET6;
5640			sin6.sin6_len = sizeof(struct sockaddr_in6);
5641			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5642			bcopy(&sin->sin_addr,
5643			    &sin6.sin6_addr.s6_addr32[3],
5644			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5645			sin6.sin6_port = sin->sin_port;
5646			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5647		}
5648#endif
5649#ifdef INET6
5650		{
5651			struct sockaddr_in6 lsa6, *from6;
5652
5653			from6 = (struct sockaddr_in6 *)from;
5654			sctp_recover_scope_mac(from6, (&lsa6));
5655		}
5656#endif
5657	}
5658	/* now copy out what data we can */
5659	if (mp == NULL) {
5660		/* copy out each mbuf in the chain up to length */
5661get_more_data:
5662		m = control->data;
5663		while (m) {
5664			/* Move out all we can */
5665			cp_len = (int)uio->uio_resid;
5666			my_len = (int)SCTP_BUF_LEN(m);
5667			if (cp_len > my_len) {
5668				/* not enough in this buf */
5669				cp_len = my_len;
5670			}
5671			if (hold_rlock) {
5672				SCTP_INP_READ_UNLOCK(inp);
5673				hold_rlock = 0;
5674			}
5675			if (cp_len > 0)
5676				error = uiomove(mtod(m, char *), cp_len, uio);
5677			/* re-read */
5678			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5679				goto release;
5680			}
5681			if ((control->do_not_ref_stcb == 0) && stcb &&
5682			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5683				no_rcv_needed = 1;
5684			}
5685			if (error) {
5686				/* error we are out of here */
5687				goto release;
5688			}
5689			if ((SCTP_BUF_NEXT(m) == NULL) &&
5690			    (cp_len >= SCTP_BUF_LEN(m)) &&
5691			    ((control->end_added == 0) ||
5692			    (control->end_added &&
5693			    (TAILQ_NEXT(control, next) == NULL)))
5694			    ) {
5695				SCTP_INP_READ_LOCK(inp);
5696				hold_rlock = 1;
5697			}
5698			if (cp_len == SCTP_BUF_LEN(m)) {
5699				if ((SCTP_BUF_NEXT(m) == NULL) &&
5700				    (control->end_added)) {
5701					out_flags |= MSG_EOR;
5702					if ((control->do_not_ref_stcb == 0) &&
5703					    (control->stcb != NULL) &&
5704					    ((control->spec_flags & M_NOTIFICATION) == 0))
5705						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5706				}
5707				if (control->spec_flags & M_NOTIFICATION) {
5708					out_flags |= MSG_NOTIFICATION;
5709				}
5710				/* we ate up the mbuf */
5711				if (in_flags & MSG_PEEK) {
5712					/* just looking */
5713					m = SCTP_BUF_NEXT(m);
5714					copied_so_far += cp_len;
5715				} else {
5716					/* dispose of the mbuf */
5717					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5718						sctp_sblog(&so->so_rcv,
5719						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5720					}
5721					sctp_sbfree(control, stcb, &so->so_rcv, m);
5722					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5723						sctp_sblog(&so->so_rcv,
5724						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5725					}
5726					copied_so_far += cp_len;
5727					freed_so_far += cp_len;
5728					freed_so_far += MSIZE;
5729					atomic_subtract_int(&control->length, cp_len);
5730					control->data = sctp_m_free(m);
5731					m = control->data;
5732					/*
5733					 * been through it all, must hold sb
5734					 * lock ok to null tail
5735					 */
5736					if (control->data == NULL) {
5737#ifdef INVARIANTS
5738						if ((control->end_added == 0) ||
5739						    (TAILQ_NEXT(control, next) == NULL)) {
5740							/*
5741							 * If the end is not
5742							 * added, OR the
5743							 * next is NOT null
5744							 * we MUST have the
5745							 * lock.
5746							 */
5747							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5748								panic("Hmm we don't own the lock?");
5749							}
5750						}
5751#endif
5752						control->tail_mbuf = NULL;
5753#ifdef INVARIANTS
5754						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5755							panic("end_added, nothing left and no MSG_EOR");
5756						}
5757#endif
5758					}
5759				}
5760			} else {
5761				/* Do we need to trim the mbuf? */
5762				if (control->spec_flags & M_NOTIFICATION) {
5763					out_flags |= MSG_NOTIFICATION;
5764				}
5765				if ((in_flags & MSG_PEEK) == 0) {
5766					SCTP_BUF_RESV_UF(m, cp_len);
5767					SCTP_BUF_LEN(m) -= cp_len;
5768					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5769						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5770					}
5771					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5772					if ((control->do_not_ref_stcb == 0) &&
5773					    stcb) {
5774						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5775					}
5776					copied_so_far += cp_len;
5777					freed_so_far += cp_len;
5778					freed_so_far += MSIZE;
5779					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5780						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5781						    SCTP_LOG_SBRESULT, 0);
5782					}
5783					atomic_subtract_int(&control->length, cp_len);
5784				} else {
5785					copied_so_far += cp_len;
5786				}
5787			}
5788			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5789				break;
5790			}
5791			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5792			    (control->do_not_ref_stcb == 0) &&
5793			    (freed_so_far >= rwnd_req)) {
5794				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5795			}
5796		}		/* end while(m) */
5797		/*
5798		 * At this point we have looked at it all and we either have
5799		 * a MSG_EOR/or read all the user wants... <OR>
5800		 * control->length == 0.
5801		 */
5802		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5803			/* we are done with this control */
5804			if (control->length == 0) {
5805				if (control->data) {
5806#ifdef INVARIANTS
5807					panic("control->data not null at read eor?");
5808#else
5809					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5810					sctp_m_freem(control->data);
5811					control->data = NULL;
5812#endif
5813				}
5814		done_with_control:
5815				if (TAILQ_NEXT(control, next) == NULL) {
5816					/*
5817					 * If we don't have a next we need a
5818					 * lock, if there is a next
5819					 * interrupt is filling ahead of us
5820					 * and we don't need a lock to
5821					 * remove this guy (which is the
5822					 * head of the queue).
5823					 */
5824					if (hold_rlock == 0) {
5825						SCTP_INP_READ_LOCK(inp);
5826						hold_rlock = 1;
5827					}
5828				}
5829				TAILQ_REMOVE(&inp->read_queue, control, next);
5830				/* Add back any hiddend data */
5831				if (control->held_length) {
5832					held_length = 0;
5833					control->held_length = 0;
5834					wakeup_read_socket = 1;
5835				}
5836				if (control->aux_data) {
5837					sctp_m_free(control->aux_data);
5838					control->aux_data = NULL;
5839				}
5840				no_rcv_needed = control->do_not_ref_stcb;
5841				sctp_free_remote_addr(control->whoFrom);
5842				control->data = NULL;
5843				sctp_free_a_readq(stcb, control);
5844				control = NULL;
5845				if ((freed_so_far >= rwnd_req) &&
5846				    (no_rcv_needed == 0))
5847					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5848
5849			} else {
5850				/*
5851				 * The user did not read all of this
5852				 * message, turn off the returned MSG_EOR
5853				 * since we are leaving more behind on the
5854				 * control to read.
5855				 */
5856#ifdef INVARIANTS
5857				if (control->end_added &&
5858				    (control->data == NULL) &&
5859				    (control->tail_mbuf == NULL)) {
5860					panic("Gak, control->length is corrupt?");
5861				}
5862#endif
5863				no_rcv_needed = control->do_not_ref_stcb;
5864				out_flags &= ~MSG_EOR;
5865			}
5866		}
5867		if (out_flags & MSG_EOR) {
5868			goto release;
5869		}
5870		if ((uio->uio_resid == 0) ||
5871		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5872		    ) {
5873			goto release;
5874		}
5875		/*
5876		 * If I hit here the receiver wants more and this message is
5877		 * NOT done (pd-api). So two questions. Can we block? if not
5878		 * we are done. Did the user NOT set MSG_WAITALL?
5879		 */
5880		if (block_allowed == 0) {
5881			goto release;
5882		}
5883		/*
5884		 * We need to wait for more data a few things: - We don't
5885		 * sbunlock() so we don't get someone else reading. - We
5886		 * must be sure to account for the case where what is added
5887		 * is NOT to our control when we wakeup.
5888		 */
5889
5890		/*
5891		 * Do we need to tell the transport a rwnd update might be
5892		 * needed before we go to sleep?
5893		 */
5894		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5895		    ((freed_so_far >= rwnd_req) &&
5896		    (control->do_not_ref_stcb == 0) &&
5897		    (no_rcv_needed == 0))) {
5898			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5899		}
5900wait_some_more:
5901		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5902			goto release;
5903		}
5904		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5905			goto release;
5906
5907		if (hold_rlock == 1) {
5908			SCTP_INP_READ_UNLOCK(inp);
5909			hold_rlock = 0;
5910		}
5911		if (hold_sblock == 0) {
5912			SOCKBUF_LOCK(&so->so_rcv);
5913			hold_sblock = 1;
5914		}
5915		if ((copied_so_far) && (control->length == 0) &&
5916		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5917			goto release;
5918		}
5919		if (so->so_rcv.sb_cc <= control->held_length) {
5920			error = sbwait(&so->so_rcv);
5921			if (error) {
5922				goto release;
5923			}
5924			control->held_length = 0;
5925		}
5926		if (hold_sblock) {
5927			SOCKBUF_UNLOCK(&so->so_rcv);
5928			hold_sblock = 0;
5929		}
5930		if (control->length == 0) {
5931			/* still nothing here */
5932			if (control->end_added == 1) {
5933				/* he aborted, or is done i.e.did a shutdown */
5934				out_flags |= MSG_EOR;
5935				if (control->pdapi_aborted) {
5936					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5937						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5938
5939					out_flags |= MSG_TRUNC;
5940				} else {
5941					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5942						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5943				}
5944				goto done_with_control;
5945			}
5946			if (so->so_rcv.sb_cc > held_length) {
5947				control->held_length = so->so_rcv.sb_cc;
5948				held_length = 0;
5949			}
5950			goto wait_some_more;
5951		} else if (control->data == NULL) {
5952			/*
5953			 * we must re-sync since data is probably being
5954			 * added
5955			 */
5956			SCTP_INP_READ_LOCK(inp);
5957			if ((control->length > 0) && (control->data == NULL)) {
5958				/*
5959				 * big trouble.. we have the lock and its
5960				 * corrupt?
5961				 */
5962#ifdef INVARIANTS
5963				panic("Impossible data==NULL length !=0");
5964#endif
5965				out_flags |= MSG_EOR;
5966				out_flags |= MSG_TRUNC;
5967				control->length = 0;
5968				SCTP_INP_READ_UNLOCK(inp);
5969				goto done_with_control;
5970			}
5971			SCTP_INP_READ_UNLOCK(inp);
5972			/* We will fall around to get more data */
5973		}
5974		goto get_more_data;
5975	} else {
5976		/*-
5977		 * Give caller back the mbuf chain,
5978		 * store in uio_resid the length
5979		 */
5980		wakeup_read_socket = 0;
5981		if ((control->end_added == 0) ||
5982		    (TAILQ_NEXT(control, next) == NULL)) {
5983			/* Need to get rlock */
5984			if (hold_rlock == 0) {
5985				SCTP_INP_READ_LOCK(inp);
5986				hold_rlock = 1;
5987			}
5988		}
5989		if (control->end_added) {
5990			out_flags |= MSG_EOR;
5991			if ((control->do_not_ref_stcb == 0) &&
5992			    (control->stcb != NULL) &&
5993			    ((control->spec_flags & M_NOTIFICATION) == 0))
5994				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5995		}
5996		if (control->spec_flags & M_NOTIFICATION) {
5997			out_flags |= MSG_NOTIFICATION;
5998		}
5999		uio->uio_resid = control->length;
6000		*mp = control->data;
6001		m = control->data;
6002		while (m) {
6003			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6004				sctp_sblog(&so->so_rcv,
6005				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6006			}
6007			sctp_sbfree(control, stcb, &so->so_rcv, m);
6008			freed_so_far += SCTP_BUF_LEN(m);
6009			freed_so_far += MSIZE;
6010			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6011				sctp_sblog(&so->so_rcv,
6012				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6013			}
6014			m = SCTP_BUF_NEXT(m);
6015		}
6016		control->data = control->tail_mbuf = NULL;
6017		control->length = 0;
6018		if (out_flags & MSG_EOR) {
6019			/* Done with this control */
6020			goto done_with_control;
6021		}
6022	}
6023release:
6024	if (hold_rlock == 1) {
6025		SCTP_INP_READ_UNLOCK(inp);
6026		hold_rlock = 0;
6027	}
6028	if (hold_sblock == 1) {
6029		SOCKBUF_UNLOCK(&so->so_rcv);
6030		hold_sblock = 0;
6031	}
6032	sbunlock(&so->so_rcv);
6033	sockbuf_lock = 0;
6034
6035release_unlocked:
6036	if (hold_sblock) {
6037		SOCKBUF_UNLOCK(&so->so_rcv);
6038		hold_sblock = 0;
6039	}
6040	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6041		if ((freed_so_far >= rwnd_req) &&
6042		    (control && (control->do_not_ref_stcb == 0)) &&
6043		    (no_rcv_needed == 0))
6044			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6045	}
6046out:
6047	if (msg_flags) {
6048		*msg_flags = out_flags;
6049	}
6050	if (((out_flags & MSG_EOR) == 0) &&
6051	    ((in_flags & MSG_PEEK) == 0) &&
6052	    (sinfo) &&
6053	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6054	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6055		struct sctp_extrcvinfo *s_extra;
6056
6057		s_extra = (struct sctp_extrcvinfo *)sinfo;
6058		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6059	}
6060	if (hold_rlock == 1) {
6061		SCTP_INP_READ_UNLOCK(inp);
6062	}
6063	if (hold_sblock) {
6064		SOCKBUF_UNLOCK(&so->so_rcv);
6065	}
6066	if (sockbuf_lock) {
6067		sbunlock(&so->so_rcv);
6068	}
6069	if (freecnt_applied) {
6070		/*
6071		 * The lock on the socket buffer protects us so the free
6072		 * code will stop. But since we used the socketbuf lock and
6073		 * the sender uses the tcb_lock to increment, we need to use
6074		 * the atomic add to the refcnt.
6075		 */
6076		if (stcb == NULL) {
6077#ifdef INVARIANTS
6078			panic("stcb for refcnt has gone NULL?");
6079			goto stage_left;
6080#else
6081			goto stage_left;
6082#endif
6083		}
6084		atomic_add_int(&stcb->asoc.refcnt, -1);
6085		/* Save the value back for next time */
6086		stcb->freed_by_sorcv_sincelast = freed_so_far;
6087	}
6088	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6089		if (stcb) {
6090			sctp_misc_ints(SCTP_SORECV_DONE,
6091			    freed_so_far,
6092			    ((uio) ? (slen - uio->uio_resid) : slen),
6093			    stcb->asoc.my_rwnd,
6094			    so->so_rcv.sb_cc);
6095		} else {
6096			sctp_misc_ints(SCTP_SORECV_DONE,
6097			    freed_so_far,
6098			    ((uio) ? (slen - uio->uio_resid) : slen),
6099			    0,
6100			    so->so_rcv.sb_cc);
6101		}
6102	}
6103stage_left:
6104	if (wakeup_read_socket) {
6105		sctp_sorwakeup(inp, so);
6106	}
6107	return (error);
6108}
6109
6110
6111#ifdef SCTP_MBUF_LOGGING
6112struct mbuf *
6113sctp_m_free(struct mbuf *m)
6114{
6115	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6116		if (SCTP_BUF_IS_EXTENDED(m)) {
6117			sctp_log_mb(m, SCTP_MBUF_IFREE);
6118		}
6119	}
6120	return (m_free(m));
6121}
6122
6123void
6124sctp_m_freem(struct mbuf *mb)
6125{
6126	while (mb != NULL)
6127		mb = sctp_m_free(mb);
6128}
6129
6130#endif
6131
6132int
6133sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6134{
6135	/*
6136	 * Given a local address. For all associations that holds the
6137	 * address, request a peer-set-primary.
6138	 */
6139	struct sctp_ifa *ifa;
6140	struct sctp_laddr *wi;
6141
6142	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6143	if (ifa == NULL) {
6144		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6145		return (EADDRNOTAVAIL);
6146	}
6147	/*
6148	 * Now that we have the ifa we must awaken the iterator with this
6149	 * message.
6150	 */
6151	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6152	if (wi == NULL) {
6153		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6154		return (ENOMEM);
6155	}
6156	/* Now incr the count and int wi structure */
6157	SCTP_INCR_LADDR_COUNT();
6158	bzero(wi, sizeof(*wi));
6159	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6160	wi->ifa = ifa;
6161	wi->action = SCTP_SET_PRIM_ADDR;
6162	atomic_add_int(&ifa->refcount, 1);
6163
6164	/* Now add it to the work queue */
6165	SCTP_WQ_ADDR_LOCK();
6166	/*
6167	 * Should this really be a tailq? As it is we will process the
6168	 * newest first :-0
6169	 */
6170	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6171	SCTP_WQ_ADDR_UNLOCK();
6172	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6173	    (struct sctp_inpcb *)NULL,
6174	    (struct sctp_tcb *)NULL,
6175	    (struct sctp_nets *)NULL);
6176	return (0);
6177}
6178
6179
6180int
6181sctp_soreceive(struct socket *so,
6182    struct sockaddr **psa,
6183    struct uio *uio,
6184    struct mbuf **mp0,
6185    struct mbuf **controlp,
6186    int *flagsp)
6187{
6188	int error, fromlen;
6189	uint8_t sockbuf[256];
6190	struct sockaddr *from;
6191	struct sctp_extrcvinfo sinfo;
6192	int filling_sinfo = 1;
6193	struct sctp_inpcb *inp;
6194
6195	inp = (struct sctp_inpcb *)so->so_pcb;
6196	/* pickup the assoc we are reading from */
6197	if (inp == NULL) {
6198		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6199		return (EINVAL);
6200	}
6201	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6202	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6203	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6204	    (controlp == NULL)) {
6205		/* user does not want the sndrcv ctl */
6206		filling_sinfo = 0;
6207	}
6208	if (psa) {
6209		from = (struct sockaddr *)sockbuf;
6210		fromlen = sizeof(sockbuf);
6211		from->sa_len = 0;
6212	} else {
6213		from = NULL;
6214		fromlen = 0;
6215	}
6216
6217	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6218	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6219	if ((controlp) && (filling_sinfo)) {
6220		/* copy back the sinfo in a CMSG format */
6221		if (filling_sinfo)
6222			*controlp = sctp_build_ctl_nchunk(inp,
6223			    (struct sctp_sndrcvinfo *)&sinfo);
6224		else
6225			*controlp = NULL;
6226	}
6227	if (psa) {
6228		/* copy back the address info */
6229		if (from && from->sa_len) {
6230			*psa = sodupsockaddr(from, M_NOWAIT);
6231		} else {
6232			*psa = NULL;
6233		}
6234	}
6235	return (error);
6236}
6237
6238
6239
6240
6241
6242int
6243sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6244    int totaddr, int *error)
6245{
6246	int added = 0;
6247	int i;
6248	struct sctp_inpcb *inp;
6249	struct sockaddr *sa;
6250	size_t incr = 0;
6251
6252#ifdef INET
6253	struct sockaddr_in *sin;
6254
6255#endif
6256#ifdef INET6
6257	struct sockaddr_in6 *sin6;
6258
6259#endif
6260
6261	sa = addr;
6262	inp = stcb->sctp_ep;
6263	*error = 0;
6264	for (i = 0; i < totaddr; i++) {
6265		switch (sa->sa_family) {
6266#ifdef INET
6267		case AF_INET:
6268			incr = sizeof(struct sockaddr_in);
6269			sin = (struct sockaddr_in *)sa;
6270			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6271			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6272			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6273				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6274				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6275				*error = EINVAL;
6276				goto out_now;
6277			}
6278			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6279				/* assoc gone no un-lock */
6280				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6281				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6282				*error = ENOBUFS;
6283				goto out_now;
6284			}
6285			added++;
6286			break;
6287#endif
6288#ifdef INET6
6289		case AF_INET6:
6290			incr = sizeof(struct sockaddr_in6);
6291			sin6 = (struct sockaddr_in6 *)sa;
6292			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6293			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6294				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6295				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6296				*error = EINVAL;
6297				goto out_now;
6298			}
6299			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6300				/* assoc gone no un-lock */
6301				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6302				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6303				*error = ENOBUFS;
6304				goto out_now;
6305			}
6306			added++;
6307			break;
6308#endif
6309		default:
6310			break;
6311		}
6312		sa = (struct sockaddr *)((caddr_t)sa + incr);
6313	}
6314out_now:
6315	return (added);
6316}
6317
6318struct sctp_tcb *
6319sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6320    int *totaddr, int *num_v4, int *num_v6, int *error,
6321    int limit, int *bad_addr)
6322{
6323	struct sockaddr *sa;
6324	struct sctp_tcb *stcb = NULL;
6325	size_t incr, at, i;
6326
6327	at = incr = 0;
6328	sa = addr;
6329
6330	*error = *num_v6 = *num_v4 = 0;
6331	/* account and validate addresses */
6332	for (i = 0; i < (size_t)*totaddr; i++) {
6333		switch (sa->sa_family) {
6334#ifdef INET
6335		case AF_INET:
6336			(*num_v4) += 1;
6337			incr = sizeof(struct sockaddr_in);
6338			if (sa->sa_len != incr) {
6339				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6340				*error = EINVAL;
6341				*bad_addr = 1;
6342				return (NULL);
6343			}
6344			break;
6345#endif
6346#ifdef INET6
6347		case AF_INET6:
6348			{
6349				struct sockaddr_in6 *sin6;
6350
6351				sin6 = (struct sockaddr_in6 *)sa;
6352				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6353					/* Must be non-mapped for connectx */
6354					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6355					*error = EINVAL;
6356					*bad_addr = 1;
6357					return (NULL);
6358				}
6359				(*num_v6) += 1;
6360				incr = sizeof(struct sockaddr_in6);
6361				if (sa->sa_len != incr) {
6362					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6363					*error = EINVAL;
6364					*bad_addr = 1;
6365					return (NULL);
6366				}
6367				break;
6368			}
6369#endif
6370		default:
6371			*totaddr = i;
6372			/* we are done */
6373			break;
6374		}
6375		if (i == (size_t)*totaddr) {
6376			break;
6377		}
6378		SCTP_INP_INCR_REF(inp);
6379		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6380		if (stcb != NULL) {
6381			/* Already have or am bring up an association */
6382			return (stcb);
6383		} else {
6384			SCTP_INP_DECR_REF(inp);
6385		}
6386		if ((at + incr) > (size_t)limit) {
6387			*totaddr = i;
6388			break;
6389		}
6390		sa = (struct sockaddr *)((caddr_t)sa + incr);
6391	}
6392	return ((struct sctp_tcb *)NULL);
6393}
6394
6395/*
6396 * sctp_bindx(ADD) for one address.
6397 * assumes all arguments are valid/checked by caller.
6398 */
6399void
6400sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6401    struct sockaddr *sa, sctp_assoc_t assoc_id,
6402    uint32_t vrf_id, int *error, void *p)
6403{
6404	struct sockaddr *addr_touse;
6405
6406#ifdef INET6
6407	struct sockaddr_in sin;
6408
6409#endif
6410
6411	/* see if we're bound all already! */
6412	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6413		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6414		*error = EINVAL;
6415		return;
6416	}
6417	addr_touse = sa;
6418#ifdef INET6
6419	if (sa->sa_family == AF_INET6) {
6420		struct sockaddr_in6 *sin6;
6421
6422		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6423			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6424			*error = EINVAL;
6425			return;
6426		}
6427		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6428			/* can only bind v6 on PF_INET6 sockets */
6429			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6430			*error = EINVAL;
6431			return;
6432		}
6433		sin6 = (struct sockaddr_in6 *)addr_touse;
6434		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6435			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6436			    SCTP_IPV6_V6ONLY(inp)) {
6437				/* can't bind v4-mapped on PF_INET sockets */
6438				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6439				*error = EINVAL;
6440				return;
6441			}
6442			in6_sin6_2_sin(&sin, sin6);
6443			addr_touse = (struct sockaddr *)&sin;
6444		}
6445	}
6446#endif
6447#ifdef INET
6448	if (sa->sa_family == AF_INET) {
6449		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6450			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6451			*error = EINVAL;
6452			return;
6453		}
6454		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6455		    SCTP_IPV6_V6ONLY(inp)) {
6456			/* can't bind v4 on PF_INET sockets */
6457			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6458			*error = EINVAL;
6459			return;
6460		}
6461	}
6462#endif
6463	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6464		if (p == NULL) {
6465			/* Can't get proc for Net/Open BSD */
6466			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6467			*error = EINVAL;
6468			return;
6469		}
6470		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6471		return;
6472	}
6473	/*
6474	 * No locks required here since bind and mgmt_ep_sa all do their own
6475	 * locking. If we do something for the FIX: below we may need to
6476	 * lock in that case.
6477	 */
6478	if (assoc_id == 0) {
6479		/* add the address */
6480		struct sctp_inpcb *lep;
6481		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6482
6483		/* validate the incoming port */
6484		if ((lsin->sin_port != 0) &&
6485		    (lsin->sin_port != inp->sctp_lport)) {
6486			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6487			*error = EINVAL;
6488			return;
6489		} else {
6490			/* user specified 0 port, set it to existing port */
6491			lsin->sin_port = inp->sctp_lport;
6492		}
6493
6494		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6495		if (lep != NULL) {
6496			/*
6497			 * We must decrement the refcount since we have the
6498			 * ep already and are binding. No remove going on
6499			 * here.
6500			 */
6501			SCTP_INP_DECR_REF(lep);
6502		}
6503		if (lep == inp) {
6504			/* already bound to it.. ok */
6505			return;
6506		} else if (lep == NULL) {
6507			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6508			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6509			    SCTP_ADD_IP_ADDRESS,
6510			    vrf_id, NULL);
6511		} else {
6512			*error = EADDRINUSE;
6513		}
6514		if (*error)
6515			return;
6516	} else {
6517		/*
6518		 * FIX: decide whether we allow assoc based bindx
6519		 */
6520	}
6521}
6522
6523/*
6524 * sctp_bindx(DELETE) for one address.
6525 * assumes all arguments are valid/checked by caller.
6526 */
6527void
6528sctp_bindx_delete_address(struct sctp_inpcb *inp,
6529    struct sockaddr *sa, sctp_assoc_t assoc_id,
6530    uint32_t vrf_id, int *error)
6531{
6532	struct sockaddr *addr_touse;
6533
6534#ifdef INET6
6535	struct sockaddr_in sin;
6536
6537#endif
6538
6539	/* see if we're bound all already! */
6540	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6541		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6542		*error = EINVAL;
6543		return;
6544	}
6545	addr_touse = sa;
6546#ifdef INET6
6547	if (sa->sa_family == AF_INET6) {
6548		struct sockaddr_in6 *sin6;
6549
6550		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6551			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6552			*error = EINVAL;
6553			return;
6554		}
6555		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6556			/* can only bind v6 on PF_INET6 sockets */
6557			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6558			*error = EINVAL;
6559			return;
6560		}
6561		sin6 = (struct sockaddr_in6 *)addr_touse;
6562		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6563			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6564			    SCTP_IPV6_V6ONLY(inp)) {
6565				/* can't bind mapped-v4 on PF_INET sockets */
6566				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6567				*error = EINVAL;
6568				return;
6569			}
6570			in6_sin6_2_sin(&sin, sin6);
6571			addr_touse = (struct sockaddr *)&sin;
6572		}
6573	}
6574#endif
6575#ifdef INET
6576	if (sa->sa_family == AF_INET) {
6577		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6578			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6579			*error = EINVAL;
6580			return;
6581		}
6582		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6583		    SCTP_IPV6_V6ONLY(inp)) {
6584			/* can't bind v4 on PF_INET sockets */
6585			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6586			*error = EINVAL;
6587			return;
6588		}
6589	}
6590#endif
6591	/*
6592	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6593	 * below is ever changed we may need to lock before calling
6594	 * association level binding.
6595	 */
6596	if (assoc_id == 0) {
6597		/* delete the address */
6598		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6599		    SCTP_DEL_IP_ADDRESS,
6600		    vrf_id, NULL);
6601	} else {
6602		/*
6603		 * FIX: decide whether we allow assoc based bindx
6604		 */
6605	}
6606}
6607
6608/*
6609 * returns the valid local address count for an assoc, taking into account
6610 * all scoping rules
6611 */
6612int
6613sctp_local_addr_count(struct sctp_tcb *stcb)
6614{
6615	int loopback_scope;
6616
6617#if defined(INET)
6618	int ipv4_local_scope, ipv4_addr_legal;
6619
6620#endif
6621#if defined (INET6)
6622	int local_scope, site_scope, ipv6_addr_legal;
6623
6624#endif
6625	struct sctp_vrf *vrf;
6626	struct sctp_ifn *sctp_ifn;
6627	struct sctp_ifa *sctp_ifa;
6628	int count = 0;
6629
6630	/* Turn on all the appropriate scopes */
6631	loopback_scope = stcb->asoc.scope.loopback_scope;
6632#if defined(INET)
6633	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6634	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6635#endif
6636#if defined(INET6)
6637	local_scope = stcb->asoc.scope.local_scope;
6638	site_scope = stcb->asoc.scope.site_scope;
6639	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6640#endif
6641	SCTP_IPI_ADDR_RLOCK();
6642	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6643	if (vrf == NULL) {
6644		/* no vrf, no addresses */
6645		SCTP_IPI_ADDR_RUNLOCK();
6646		return (0);
6647	}
6648	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6649		/*
6650		 * bound all case: go through all ifns on the vrf
6651		 */
6652		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6653			if ((loopback_scope == 0) &&
6654			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6655				continue;
6656			}
6657			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6658				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6659					continue;
6660				switch (sctp_ifa->address.sa.sa_family) {
6661#ifdef INET
6662				case AF_INET:
6663					if (ipv4_addr_legal) {
6664						struct sockaddr_in *sin;
6665
6666						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6667						if (sin->sin_addr.s_addr == 0) {
6668							/*
6669							 * skip unspecified
6670							 * addrs
6671							 */
6672							continue;
6673						}
6674						if ((ipv4_local_scope == 0) &&
6675						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6676							continue;
6677						}
6678						/* count this one */
6679						count++;
6680					} else {
6681						continue;
6682					}
6683					break;
6684#endif
6685#ifdef INET6
6686				case AF_INET6:
6687					if (ipv6_addr_legal) {
6688						struct sockaddr_in6 *sin6;
6689
6690						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6691						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6692							continue;
6693						}
6694						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6695							if (local_scope == 0)
6696								continue;
6697							if (sin6->sin6_scope_id == 0) {
6698								if (sa6_recoverscope(sin6) != 0)
6699									/*
6700									 *
6701									 * bad
6702									 *
6703									 * li
6704									 * nk
6705									 *
6706									 * loc
6707									 * al
6708									 *
6709									 * add
6710									 * re
6711									 * ss
6712									 * */
6713									continue;
6714							}
6715						}
6716						if ((site_scope == 0) &&
6717						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6718							continue;
6719						}
6720						/* count this one */
6721						count++;
6722					}
6723					break;
6724#endif
6725				default:
6726					/* TSNH */
6727					break;
6728				}
6729			}
6730		}
6731	} else {
6732		/*
6733		 * subset bound case
6734		 */
6735		struct sctp_laddr *laddr;
6736
6737		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6738		    sctp_nxt_addr) {
6739			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6740				continue;
6741			}
6742			/* count this one */
6743			count++;
6744		}
6745	}
6746	SCTP_IPI_ADDR_RUNLOCK();
6747	return (count);
6748}
6749
6750#if defined(SCTP_LOCAL_TRACE_BUF)
6751
6752void
6753sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6754{
6755	uint32_t saveindex, newindex;
6756
6757	do {
6758		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6759		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6760			newindex = 1;
6761		} else {
6762			newindex = saveindex + 1;
6763		}
6764	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6765	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6766		saveindex = 0;
6767	}
6768	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6769	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6770	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6771	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6772	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6773	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6774	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6775	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6776}
6777
6778#endif
6779static void
6780sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6781{
6782	struct ip *iph;
6783
6784#ifdef INET6
6785	struct ip6_hdr *ip6;
6786
6787#endif
6788	struct mbuf *sp, *last;
6789	struct udphdr *uhdr;
6790	uint16_t port;
6791
6792	if ((m->m_flags & M_PKTHDR) == 0) {
6793		/* Can't handle one that is not a pkt hdr */
6794		goto out;
6795	}
6796	/* Pull the src port */
6797	iph = mtod(m, struct ip *);
6798	uhdr = (struct udphdr *)((caddr_t)iph + off);
6799	port = uhdr->uh_sport;
6800	/*
6801	 * Split out the mbuf chain. Leave the IP header in m, place the
6802	 * rest in the sp.
6803	 */
6804	sp = m_split(m, off, M_NOWAIT);
6805	if (sp == NULL) {
6806		/* Gak, drop packet, we can't do a split */
6807		goto out;
6808	}
6809	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6810		/* Gak, packet can't have an SCTP header in it - too small */
6811		m_freem(sp);
6812		goto out;
6813	}
6814	/* Now pull up the UDP header and SCTP header together */
6815	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6816	if (sp == NULL) {
6817		/* Gak pullup failed */
6818		goto out;
6819	}
6820	/* Trim out the UDP header */
6821	m_adj(sp, sizeof(struct udphdr));
6822
6823	/* Now reconstruct the mbuf chain */
6824	for (last = m; last->m_next; last = last->m_next);
6825	last->m_next = sp;
6826	m->m_pkthdr.len += sp->m_pkthdr.len;
6827	iph = mtod(m, struct ip *);
6828	switch (iph->ip_v) {
6829#ifdef INET
6830	case IPVERSION:
6831		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6832		sctp_input_with_port(m, off, port);
6833		break;
6834#endif
6835#ifdef INET6
6836	case IPV6_VERSION >> 4:
6837		ip6 = mtod(m, struct ip6_hdr *);
6838		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6839		sctp6_input_with_port(&m, &off, port);
6840		break;
6841#endif
6842	default:
6843		goto out;
6844		break;
6845	}
6846	return;
6847out:
6848	m_freem(m);
6849}
6850
6851void
6852sctp_over_udp_stop(void)
6853{
6854	/*
6855	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6856	 * for writting!
6857	 */
6858#ifdef INET
6859	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6860		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6861		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6862	}
6863#endif
6864#ifdef INET6
6865	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6866		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6867		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6868	}
6869#endif
6870}
6871
6872int
6873sctp_over_udp_start(void)
6874{
6875	uint16_t port;
6876	int ret;
6877
6878#ifdef INET
6879	struct sockaddr_in sin;
6880
6881#endif
6882#ifdef INET6
6883	struct sockaddr_in6 sin6;
6884
6885#endif
6886	/*
6887	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6888	 * for writting!
6889	 */
6890	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6891	if (ntohs(port) == 0) {
6892		/* Must have a port set */
6893		return (EINVAL);
6894	}
6895#ifdef INET
6896	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6897		/* Already running -- must stop first */
6898		return (EALREADY);
6899	}
6900#endif
6901#ifdef INET6
6902	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6903		/* Already running -- must stop first */
6904		return (EALREADY);
6905	}
6906#endif
6907#ifdef INET
6908	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6909	    SOCK_DGRAM, IPPROTO_UDP,
6910	    curthread->td_ucred, curthread))) {
6911		sctp_over_udp_stop();
6912		return (ret);
6913	}
6914	/* Call the special UDP hook. */
6915	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6916	    sctp_recv_udp_tunneled_packet))) {
6917		sctp_over_udp_stop();
6918		return (ret);
6919	}
6920	/* Ok, we have a socket, bind it to the port. */
6921	memset(&sin, 0, sizeof(struct sockaddr_in));
6922	sin.sin_len = sizeof(struct sockaddr_in);
6923	sin.sin_family = AF_INET;
6924	sin.sin_port = htons(port);
6925	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6926	    (struct sockaddr *)&sin, curthread))) {
6927		sctp_over_udp_stop();
6928		return (ret);
6929	}
6930#endif
6931#ifdef INET6
6932	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
6933	    SOCK_DGRAM, IPPROTO_UDP,
6934	    curthread->td_ucred, curthread))) {
6935		sctp_over_udp_stop();
6936		return (ret);
6937	}
6938	/* Call the special UDP hook. */
6939	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
6940	    sctp_recv_udp_tunneled_packet))) {
6941		sctp_over_udp_stop();
6942		return (ret);
6943	}
6944	/* Ok, we have a socket, bind it to the port. */
6945	memset(&sin6, 0, sizeof(struct sockaddr_in6));
6946	sin6.sin6_len = sizeof(struct sockaddr_in6);
6947	sin6.sin6_family = AF_INET6;
6948	sin6.sin6_port = htons(port);
6949	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
6950	    (struct sockaddr *)&sin6, curthread))) {
6951		sctp_over_udp_stop();
6952		return (ret);
6953	}
6954#endif
6955	return (0);
6956}
6957