sctputil.c revision 270359
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctputil.c 270359 2014-08-22 20:05:09Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctputil.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
41#ifdef INET6
42#include <netinet6/sctp6_var.h>
43#endif
44#include <netinet/sctp_header.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_asconf.h>
51#include <netinet/sctp_bsd_addr.h>
52#include <netinet/udp.h>
53#include <netinet/udp_var.h>
54#include <sys/proc.h>
55
56
57#ifndef KTR_SCTP
58#define KTR_SCTP KTR_SUBSYS
59#endif
60
61extern struct sctp_cc_functions sctp_cc_functions[];
62extern struct sctp_ss_functions sctp_ss_functions[];
63
64void
65sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66{
67	struct sctp_cwnd_log sctp_clog;
68
69	sctp_clog.x.sb.stcb = stcb;
70	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71	if (stcb)
72		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73	else
74		sctp_clog.x.sb.stcb_sbcc = 0;
75	sctp_clog.x.sb.incr = incr;
76	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77	    SCTP_LOG_EVENT_SB,
78	    from,
79	    sctp_clog.x.misc.log1,
80	    sctp_clog.x.misc.log2,
81	    sctp_clog.x.misc.log3,
82	    sctp_clog.x.misc.log4);
83}
84
85void
86sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87{
88	struct sctp_cwnd_log sctp_clog;
89
90	sctp_clog.x.close.inp = (void *)inp;
91	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92	if (stcb) {
93		sctp_clog.x.close.stcb = (void *)stcb;
94		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95	} else {
96		sctp_clog.x.close.stcb = 0;
97		sctp_clog.x.close.state = 0;
98	}
99	sctp_clog.x.close.loc = loc;
100	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101	    SCTP_LOG_EVENT_CLOSE,
102	    0,
103	    sctp_clog.x.misc.log1,
104	    sctp_clog.x.misc.log2,
105	    sctp_clog.x.misc.log3,
106	    sctp_clog.x.misc.log4);
107}
108
109void
110rto_logging(struct sctp_nets *net, int from)
111{
112	struct sctp_cwnd_log sctp_clog;
113
114	memset(&sctp_clog, 0, sizeof(sctp_clog));
115	sctp_clog.x.rto.net = (void *)net;
116	sctp_clog.x.rto.rtt = net->rtt / 1000;
117	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118	    SCTP_LOG_EVENT_RTT,
119	    from,
120	    sctp_clog.x.misc.log1,
121	    sctp_clog.x.misc.log2,
122	    sctp_clog.x.misc.log3,
123	    sctp_clog.x.misc.log4);
124}
125
126void
127sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128{
129	struct sctp_cwnd_log sctp_clog;
130
131	sctp_clog.x.strlog.stcb = stcb;
132	sctp_clog.x.strlog.n_tsn = tsn;
133	sctp_clog.x.strlog.n_sseq = sseq;
134	sctp_clog.x.strlog.e_tsn = 0;
135	sctp_clog.x.strlog.e_sseq = 0;
136	sctp_clog.x.strlog.strm = stream;
137	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138	    SCTP_LOG_EVENT_STRM,
139	    from,
140	    sctp_clog.x.misc.log1,
141	    sctp_clog.x.misc.log2,
142	    sctp_clog.x.misc.log3,
143	    sctp_clog.x.misc.log4);
144}
145
146void
147sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148{
149	struct sctp_cwnd_log sctp_clog;
150
151	sctp_clog.x.nagle.stcb = (void *)stcb;
152	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157	    SCTP_LOG_EVENT_NAGLE,
158	    action,
159	    sctp_clog.x.misc.log1,
160	    sctp_clog.x.misc.log2,
161	    sctp_clog.x.misc.log3,
162	    sctp_clog.x.misc.log4);
163}
164
165void
166sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167{
168	struct sctp_cwnd_log sctp_clog;
169
170	sctp_clog.x.sack.cumack = cumack;
171	sctp_clog.x.sack.oldcumack = old_cumack;
172	sctp_clog.x.sack.tsn = tsn;
173	sctp_clog.x.sack.numGaps = gaps;
174	sctp_clog.x.sack.numDups = dups;
175	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176	    SCTP_LOG_EVENT_SACK,
177	    from,
178	    sctp_clog.x.misc.log1,
179	    sctp_clog.x.misc.log2,
180	    sctp_clog.x.misc.log3,
181	    sctp_clog.x.misc.log4);
182}
183
184void
185sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186{
187	struct sctp_cwnd_log sctp_clog;
188
189	memset(&sctp_clog, 0, sizeof(sctp_clog));
190	sctp_clog.x.map.base = map;
191	sctp_clog.x.map.cum = cum;
192	sctp_clog.x.map.high = high;
193	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194	    SCTP_LOG_EVENT_MAP,
195	    from,
196	    sctp_clog.x.misc.log1,
197	    sctp_clog.x.misc.log2,
198	    sctp_clog.x.misc.log3,
199	    sctp_clog.x.misc.log4);
200}
201
202void
203sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204{
205	struct sctp_cwnd_log sctp_clog;
206
207	memset(&sctp_clog, 0, sizeof(sctp_clog));
208	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210	sctp_clog.x.fr.tsn = tsn;
211	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212	    SCTP_LOG_EVENT_FR,
213	    from,
214	    sctp_clog.x.misc.log1,
215	    sctp_clog.x.misc.log2,
216	    sctp_clog.x.misc.log3,
217	    sctp_clog.x.misc.log4);
218}
219
220void
221sctp_log_mb(struct mbuf *m, int from)
222{
223	struct sctp_cwnd_log sctp_clog;
224
225	sctp_clog.x.mb.mp = m;
226	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
227	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
228	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
229	if (SCTP_BUF_IS_EXTENDED(m)) {
230		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
231		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
232	} else {
233		sctp_clog.x.mb.ext = 0;
234		sctp_clog.x.mb.refcnt = 0;
235	}
236	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
237	    SCTP_LOG_EVENT_MBUF,
238	    from,
239	    sctp_clog.x.misc.log1,
240	    sctp_clog.x.misc.log2,
241	    sctp_clog.x.misc.log3,
242	    sctp_clog.x.misc.log4);
243}
244
245void
246sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
247{
248	struct sctp_cwnd_log sctp_clog;
249
250	if (control == NULL) {
251		SCTP_PRINTF("Gak log of NULL?\n");
252		return;
253	}
254	sctp_clog.x.strlog.stcb = control->stcb;
255	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
256	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
257	sctp_clog.x.strlog.strm = control->sinfo_stream;
258	if (poschk != NULL) {
259		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
260		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
261	} else {
262		sctp_clog.x.strlog.e_tsn = 0;
263		sctp_clog.x.strlog.e_sseq = 0;
264	}
265	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
266	    SCTP_LOG_EVENT_STRM,
267	    from,
268	    sctp_clog.x.misc.log1,
269	    sctp_clog.x.misc.log2,
270	    sctp_clog.x.misc.log3,
271	    sctp_clog.x.misc.log4);
272}
273
274void
275sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
276{
277	struct sctp_cwnd_log sctp_clog;
278
279	sctp_clog.x.cwnd.net = net;
280	if (stcb->asoc.send_queue_cnt > 255)
281		sctp_clog.x.cwnd.cnt_in_send = 255;
282	else
283		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284	if (stcb->asoc.stream_queue_cnt > 255)
285		sctp_clog.x.cwnd.cnt_in_str = 255;
286	else
287		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288
289	if (net) {
290		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
291		sctp_clog.x.cwnd.inflight = net->flight_size;
292		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
293		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295	}
296	if (SCTP_CWNDLOG_PRESEND == from) {
297		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298	}
299	sctp_clog.x.cwnd.cwnd_augment = augment;
300	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
301	    SCTP_LOG_EVENT_CWND,
302	    from,
303	    sctp_clog.x.misc.log1,
304	    sctp_clog.x.misc.log2,
305	    sctp_clog.x.misc.log3,
306	    sctp_clog.x.misc.log4);
307}
308
309void
310sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
311{
312	struct sctp_cwnd_log sctp_clog;
313
314	memset(&sctp_clog, 0, sizeof(sctp_clog));
315	if (inp) {
316		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
317
318	} else {
319		sctp_clog.x.lock.sock = (void *)NULL;
320	}
321	sctp_clog.x.lock.inp = (void *)inp;
322	if (stcb) {
323		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
324	} else {
325		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
326	}
327	if (inp) {
328		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
329		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
330	} else {
331		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
332		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
333	}
334	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
335	if (inp && (inp->sctp_socket)) {
336		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
337		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
339	} else {
340		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
341		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
342		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
343	}
344	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345	    SCTP_LOG_LOCK_EVENT,
346	    from,
347	    sctp_clog.x.misc.log1,
348	    sctp_clog.x.misc.log2,
349	    sctp_clog.x.misc.log3,
350	    sctp_clog.x.misc.log4);
351}
352
353void
354sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
355{
356	struct sctp_cwnd_log sctp_clog;
357
358	memset(&sctp_clog, 0, sizeof(sctp_clog));
359	sctp_clog.x.cwnd.net = net;
360	sctp_clog.x.cwnd.cwnd_new_value = error;
361	sctp_clog.x.cwnd.inflight = net->flight_size;
362	sctp_clog.x.cwnd.cwnd_augment = burst;
363	if (stcb->asoc.send_queue_cnt > 255)
364		sctp_clog.x.cwnd.cnt_in_send = 255;
365	else
366		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
367	if (stcb->asoc.stream_queue_cnt > 255)
368		sctp_clog.x.cwnd.cnt_in_str = 255;
369	else
370		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
371	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
372	    SCTP_LOG_EVENT_MAXBURST,
373	    from,
374	    sctp_clog.x.misc.log1,
375	    sctp_clog.x.misc.log2,
376	    sctp_clog.x.misc.log3,
377	    sctp_clog.x.misc.log4);
378}
379
380void
381sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
382{
383	struct sctp_cwnd_log sctp_clog;
384
385	sctp_clog.x.rwnd.rwnd = peers_rwnd;
386	sctp_clog.x.rwnd.send_size = snd_size;
387	sctp_clog.x.rwnd.overhead = overhead;
388	sctp_clog.x.rwnd.new_rwnd = 0;
389	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390	    SCTP_LOG_EVENT_RWND,
391	    from,
392	    sctp_clog.x.misc.log1,
393	    sctp_clog.x.misc.log2,
394	    sctp_clog.x.misc.log3,
395	    sctp_clog.x.misc.log4);
396}
397
398void
399sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
400{
401	struct sctp_cwnd_log sctp_clog;
402
403	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404	sctp_clog.x.rwnd.send_size = flight_size;
405	sctp_clog.x.rwnd.overhead = overhead;
406	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
407	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408	    SCTP_LOG_EVENT_RWND,
409	    from,
410	    sctp_clog.x.misc.log1,
411	    sctp_clog.x.misc.log2,
412	    sctp_clog.x.misc.log3,
413	    sctp_clog.x.misc.log4);
414}
415
416void
417sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
418{
419	struct sctp_cwnd_log sctp_clog;
420
421	sctp_clog.x.mbcnt.total_queue_size = total_oq;
422	sctp_clog.x.mbcnt.size_change = book;
423	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
424	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
425	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426	    SCTP_LOG_EVENT_MBCNT,
427	    from,
428	    sctp_clog.x.misc.log1,
429	    sctp_clog.x.misc.log2,
430	    sctp_clog.x.misc.log3,
431	    sctp_clog.x.misc.log4);
432}
433
434void
435sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
436{
437	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438	    SCTP_LOG_MISC_EVENT,
439	    from,
440	    a, b, c, d);
441}
442
443void
444sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
445{
446	struct sctp_cwnd_log sctp_clog;
447
448	sctp_clog.x.wake.stcb = (void *)stcb;
449	sctp_clog.x.wake.wake_cnt = wake_cnt;
450	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
451	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
452	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
453
454	if (stcb->asoc.stream_queue_cnt < 0xff)
455		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
456	else
457		sctp_clog.x.wake.stream_qcnt = 0xff;
458
459	if (stcb->asoc.chunks_on_out_queue < 0xff)
460		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
461	else
462		sctp_clog.x.wake.chunks_on_oque = 0xff;
463
464	sctp_clog.x.wake.sctpflags = 0;
465	/* set in the defered mode stuff */
466	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
467		sctp_clog.x.wake.sctpflags |= 1;
468	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
469		sctp_clog.x.wake.sctpflags |= 2;
470	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
471		sctp_clog.x.wake.sctpflags |= 4;
472	/* what about the sb */
473	if (stcb->sctp_socket) {
474		struct socket *so = stcb->sctp_socket;
475
476		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
477	} else {
478		sctp_clog.x.wake.sbflags = 0xff;
479	}
480	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481	    SCTP_LOG_EVENT_WAKE,
482	    from,
483	    sctp_clog.x.misc.log1,
484	    sctp_clog.x.misc.log2,
485	    sctp_clog.x.misc.log3,
486	    sctp_clog.x.misc.log4);
487}
488
489void
490sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
491{
492	struct sctp_cwnd_log sctp_clog;
493
494	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
495	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
496	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
497	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
498	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
499	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
500	sctp_clog.x.blk.sndlen = sendlen;
501	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502	    SCTP_LOG_EVENT_BLOCK,
503	    from,
504	    sctp_clog.x.misc.log1,
505	    sctp_clog.x.misc.log2,
506	    sctp_clog.x.misc.log3,
507	    sctp_clog.x.misc.log4);
508}
509
510int
511sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
512{
513	/* May need to fix this if ktrdump does not work */
514	return (0);
515}
516
517#ifdef SCTP_AUDITING_ENABLED
518uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
519static int sctp_audit_indx = 0;
520
521static
522void
523sctp_print_audit_report(void)
524{
525	int i;
526	int cnt;
527
528	cnt = 0;
529	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
530		if ((sctp_audit_data[i][0] == 0xe0) &&
531		    (sctp_audit_data[i][1] == 0x01)) {
532			cnt = 0;
533			SCTP_PRINTF("\n");
534		} else if (sctp_audit_data[i][0] == 0xf0) {
535			cnt = 0;
536			SCTP_PRINTF("\n");
537		} else if ((sctp_audit_data[i][0] == 0xc0) &&
538		    (sctp_audit_data[i][1] == 0x01)) {
539			SCTP_PRINTF("\n");
540			cnt = 0;
541		}
542		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
543		    (uint32_t) sctp_audit_data[i][1]);
544		cnt++;
545		if ((cnt % 14) == 0)
546			SCTP_PRINTF("\n");
547	}
548	for (i = 0; i < sctp_audit_indx; i++) {
549		if ((sctp_audit_data[i][0] == 0xe0) &&
550		    (sctp_audit_data[i][1] == 0x01)) {
551			cnt = 0;
552			SCTP_PRINTF("\n");
553		} else if (sctp_audit_data[i][0] == 0xf0) {
554			cnt = 0;
555			SCTP_PRINTF("\n");
556		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557		    (sctp_audit_data[i][1] == 0x01)) {
558			SCTP_PRINTF("\n");
559			cnt = 0;
560		}
561		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562		    (uint32_t) sctp_audit_data[i][1]);
563		cnt++;
564		if ((cnt % 14) == 0)
565			SCTP_PRINTF("\n");
566	}
567	SCTP_PRINTF("\n");
568}
569
570void
571sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
572    struct sctp_nets *net)
573{
574	int resend_cnt, tot_out, rep, tot_book_cnt;
575	struct sctp_nets *lnet;
576	struct sctp_tmit_chunk *chk;
577
578	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
579	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
580	sctp_audit_indx++;
581	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
582		sctp_audit_indx = 0;
583	}
584	if (inp == NULL) {
585		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
586		sctp_audit_data[sctp_audit_indx][1] = 0x01;
587		sctp_audit_indx++;
588		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
589			sctp_audit_indx = 0;
590		}
591		return;
592	}
593	if (stcb == NULL) {
594		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595		sctp_audit_data[sctp_audit_indx][1] = 0x02;
596		sctp_audit_indx++;
597		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598			sctp_audit_indx = 0;
599		}
600		return;
601	}
602	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
603	sctp_audit_data[sctp_audit_indx][1] =
604	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
605	sctp_audit_indx++;
606	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607		sctp_audit_indx = 0;
608	}
609	rep = 0;
610	tot_book_cnt = 0;
611	resend_cnt = tot_out = 0;
612	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
613		if (chk->sent == SCTP_DATAGRAM_RESEND) {
614			resend_cnt++;
615		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
616			tot_out += chk->book_size;
617			tot_book_cnt++;
618		}
619	}
620	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
621		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
622		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
623		sctp_audit_indx++;
624		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625			sctp_audit_indx = 0;
626		}
627		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
628		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
629		rep = 1;
630		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
631		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
632		sctp_audit_data[sctp_audit_indx][1] =
633		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
634		sctp_audit_indx++;
635		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636			sctp_audit_indx = 0;
637		}
638	}
639	if (tot_out != stcb->asoc.total_flight) {
640		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
642		sctp_audit_indx++;
643		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644			sctp_audit_indx = 0;
645		}
646		rep = 1;
647		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
648		    (int)stcb->asoc.total_flight);
649		stcb->asoc.total_flight = tot_out;
650	}
651	if (tot_book_cnt != stcb->asoc.total_flight_count) {
652		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
654		sctp_audit_indx++;
655		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656			sctp_audit_indx = 0;
657		}
658		rep = 1;
659		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
660
661		stcb->asoc.total_flight_count = tot_book_cnt;
662	}
663	tot_out = 0;
664	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
665		tot_out += lnet->flight_size;
666	}
667	if (tot_out != stcb->asoc.total_flight) {
668		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
670		sctp_audit_indx++;
671		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672			sctp_audit_indx = 0;
673		}
674		rep = 1;
675		SCTP_PRINTF("real flight:%d net total was %d\n",
676		    stcb->asoc.total_flight, tot_out);
677		/* now corrective action */
678		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679
680			tot_out = 0;
681			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682				if ((chk->whoTo == lnet) &&
683				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
684					tot_out += chk->book_size;
685				}
686			}
687			if (lnet->flight_size != tot_out) {
688				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
689				    (void *)lnet, lnet->flight_size,
690				    tot_out);
691				lnet->flight_size = tot_out;
692			}
693		}
694	}
695	if (rep) {
696		sctp_print_audit_report();
697	}
698}
699
700void
701sctp_audit_log(uint8_t ev, uint8_t fd)
702{
703
704	sctp_audit_data[sctp_audit_indx][0] = ev;
705	sctp_audit_data[sctp_audit_indx][1] = fd;
706	sctp_audit_indx++;
707	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708		sctp_audit_indx = 0;
709	}
710}
711
712#endif
713
714/*
715 * sctp_stop_timers_for_shutdown() should be called
716 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
717 * state to make sure that all timers are stopped.
718 */
719void
720sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
721{
722	struct sctp_association *asoc;
723	struct sctp_nets *net;
724
725	asoc = &stcb->asoc;
726
727	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
728	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
729	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
730	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
731	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
732	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
733		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
734		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
735	}
736}
737
738/*
739 * a list of sizes based on typical mtu's, used only if next hop size not
740 * returned.
741 */
742static uint32_t sctp_mtu_sizes[] = {
743	68,
744	296,
745	508,
746	512,
747	544,
748	576,
749	1006,
750	1492,
751	1500,
752	1536,
753	2002,
754	2048,
755	4352,
756	4464,
757	8166,
758	17914,
759	32000,
760	65535
761};
762
763/*
764 * Return the largest MTU smaller than val. If there is no
765 * entry, just return val.
766 */
767uint32_t
768sctp_get_prev_mtu(uint32_t val)
769{
770	uint32_t i;
771
772	if (val <= sctp_mtu_sizes[0]) {
773		return (val);
774	}
775	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
776		if (val <= sctp_mtu_sizes[i]) {
777			break;
778		}
779	}
780	return (sctp_mtu_sizes[i - 1]);
781}
782
783/*
784 * Return the smallest MTU larger than val. If there is no
785 * entry, just return val.
786 */
787uint32_t
788sctp_get_next_mtu(uint32_t val)
789{
790	/* select another MTU that is just bigger than this one */
791	uint32_t i;
792
793	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
794		if (val < sctp_mtu_sizes[i]) {
795			return (sctp_mtu_sizes[i]);
796		}
797	}
798	return (val);
799}
800
801void
802sctp_fill_random_store(struct sctp_pcb *m)
803{
804	/*
805	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
806	 * our counter. The result becomes our good random numbers and we
807	 * then setup to give these out. Note that we do no locking to
808	 * protect this. This is ok, since if competing folks call this we
809	 * will get more gobbled gook in the random store which is what we
810	 * want. There is a danger that two guys will use the same random
811	 * numbers, but thats ok too since that is random as well :->
812	 */
813	m->store_at = 0;
814	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
815	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
816	    sizeof(m->random_counter), (uint8_t *) m->random_store);
817	m->random_counter++;
818}
819
820uint32_t
821sctp_select_initial_TSN(struct sctp_pcb *inp)
822{
823	/*
824	 * A true implementation should use random selection process to get
825	 * the initial stream sequence number, using RFC1750 as a good
826	 * guideline
827	 */
828	uint32_t x, *xp;
829	uint8_t *p;
830	int store_at, new_store;
831
832	if (inp->initial_sequence_debug != 0) {
833		uint32_t ret;
834
835		ret = inp->initial_sequence_debug;
836		inp->initial_sequence_debug++;
837		return (ret);
838	}
839retry:
840	store_at = inp->store_at;
841	new_store = store_at + sizeof(uint32_t);
842	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
843		new_store = 0;
844	}
845	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
846		goto retry;
847	}
848	if (new_store == 0) {
849		/* Refill the random store */
850		sctp_fill_random_store(inp);
851	}
852	p = &inp->random_store[store_at];
853	xp = (uint32_t *) p;
854	x = *xp;
855	return (x);
856}
857
858uint32_t
859sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
860{
861	uint32_t x;
862	struct timeval now;
863
864	if (check) {
865		(void)SCTP_GETTIME_TIMEVAL(&now);
866	}
867	for (;;) {
868		x = sctp_select_initial_TSN(&inp->sctp_ep);
869		if (x == 0) {
870			/* we never use 0 */
871			continue;
872		}
873		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
874			break;
875		}
876	}
877	return (x);
878}
879
880int
881sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
882    uint32_t override_tag, uint32_t vrf_id)
883{
884	struct sctp_association *asoc;
885
886	/*
887	 * Anything set to zero is taken care of by the allocation routine's
888	 * bzero
889	 */
890
891	/*
892	 * Up front select what scoping to apply on addresses I tell my peer
893	 * Not sure what to do with these right now, we will need to come up
894	 * with a way to set them. We may need to pass them through from the
895	 * caller in the sctp_aloc_assoc() function.
896	 */
897	int i;
898
899	asoc = &stcb->asoc;
900	/* init all variables to a known value. */
901	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
902	asoc->max_burst = inp->sctp_ep.max_burst;
903	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
904	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
905	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
906	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
907	asoc->ecn_supported = inp->ecn_supported;
908	asoc->prsctp_supported = inp->prsctp_supported;
909	asoc->nrsack_supported = inp->nrsack_supported;
910	asoc->sctp_cmt_pf = (uint8_t) 0;
911	asoc->sctp_frag_point = inp->sctp_frag_point;
912	asoc->sctp_features = inp->sctp_features;
913	asoc->default_dscp = inp->sctp_ep.default_dscp;
914#ifdef INET6
915	if (inp->sctp_ep.default_flowlabel) {
916		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
917	} else {
918		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
919			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
920			asoc->default_flowlabel &= 0x000fffff;
921			asoc->default_flowlabel |= 0x80000000;
922		} else {
923			asoc->default_flowlabel = 0;
924		}
925	}
926#endif
927	asoc->sb_send_resv = 0;
928	if (override_tag) {
929		asoc->my_vtag = override_tag;
930	} else {
931		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
932	}
933	/* Get the nonce tags */
934	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
935	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
936	asoc->vrf_id = vrf_id;
937
938#ifdef SCTP_ASOCLOG_OF_TSNS
939	asoc->tsn_in_at = 0;
940	asoc->tsn_out_at = 0;
941	asoc->tsn_in_wrapped = 0;
942	asoc->tsn_out_wrapped = 0;
943	asoc->cumack_log_at = 0;
944	asoc->cumack_log_atsnt = 0;
945#endif
946#ifdef SCTP_FS_SPEC_LOG
947	asoc->fs_index = 0;
948#endif
949	asoc->refcnt = 0;
950	asoc->assoc_up_sent = 0;
951	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
952	    sctp_select_initial_TSN(&inp->sctp_ep);
953	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
954	/* we are optimisitic here */
955	asoc->peer_supports_pktdrop = 1;
956	asoc->peer_supports_nat = 0;
957	asoc->sent_queue_retran_cnt = 0;
958
959	/* for CMT */
960	asoc->last_net_cmt_send_started = NULL;
961
962	/* This will need to be adjusted */
963	asoc->last_acked_seq = asoc->init_seq_number - 1;
964	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
965	asoc->asconf_seq_in = asoc->last_acked_seq;
966
967	/* here we are different, we hold the next one we expect */
968	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
969
970	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
971	asoc->initial_rto = inp->sctp_ep.initial_rto;
972
973	asoc->max_init_times = inp->sctp_ep.max_init_times;
974	asoc->max_send_times = inp->sctp_ep.max_send_times;
975	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
976	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
977	asoc->free_chunk_cnt = 0;
978
979	asoc->iam_blocking = 0;
980	asoc->context = inp->sctp_context;
981	asoc->local_strreset_support = inp->local_strreset_support;
982	asoc->def_send = inp->def_send;
983	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
984	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
985	asoc->pr_sctp_cnt = 0;
986	asoc->total_output_queue_size = 0;
987
988	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
989		asoc->scope.ipv6_addr_legal = 1;
990		if (SCTP_IPV6_V6ONLY(inp) == 0) {
991			asoc->scope.ipv4_addr_legal = 1;
992		} else {
993			asoc->scope.ipv4_addr_legal = 0;
994		}
995	} else {
996		asoc->scope.ipv6_addr_legal = 0;
997		asoc->scope.ipv4_addr_legal = 1;
998	}
999
1000	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1001	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1002
1003	asoc->smallest_mtu = inp->sctp_frag_point;
1004	asoc->minrto = inp->sctp_ep.sctp_minrto;
1005	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1006
1007	asoc->locked_on_sending = NULL;
1008	asoc->stream_locked_on = 0;
1009	asoc->ecn_echo_cnt_onq = 0;
1010	asoc->stream_locked = 0;
1011
1012	asoc->send_sack = 1;
1013
1014	LIST_INIT(&asoc->sctp_restricted_addrs);
1015
1016	TAILQ_INIT(&asoc->nets);
1017	TAILQ_INIT(&asoc->pending_reply_queue);
1018	TAILQ_INIT(&asoc->asconf_ack_sent);
1019	/* Setup to fill the hb random cache at first HB */
1020	asoc->hb_random_idx = 4;
1021
1022	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1023
1024	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1025	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1026
1027	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1028	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1029
1030	/*
1031	 * Now the stream parameters, here we allocate space for all streams
1032	 * that we request by default.
1033	 */
1034	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1035	    inp->sctp_ep.pre_open_stream_count;
1036	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1037	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1038	    SCTP_M_STRMO);
1039	if (asoc->strmout == NULL) {
1040		/* big trouble no memory */
1041		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1042		return (ENOMEM);
1043	}
1044	for (i = 0; i < asoc->streamoutcnt; i++) {
1045		/*
1046		 * inbound side must be set to 0xffff, also NOTE when we get
1047		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1048		 * count (streamoutcnt) but first check if we sent to any of
1049		 * the upper streams that were dropped (if some were). Those
1050		 * that were dropped must be notified to the upper layer as
1051		 * failed to send.
1052		 */
1053		asoc->strmout[i].next_sequence_send = 0x0;
1054		TAILQ_INIT(&asoc->strmout[i].outqueue);
1055		asoc->strmout[i].chunks_on_queues = 0;
1056		asoc->strmout[i].stream_no = i;
1057		asoc->strmout[i].last_msg_incomplete = 0;
1058		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1059	}
1060	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1061
1062	/* Now the mapping array */
1063	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1064	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1065	    SCTP_M_MAP);
1066	if (asoc->mapping_array == NULL) {
1067		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1068		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1069		return (ENOMEM);
1070	}
1071	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1072	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1073	    SCTP_M_MAP);
1074	if (asoc->nr_mapping_array == NULL) {
1075		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1076		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1077		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1078		return (ENOMEM);
1079	}
1080	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1081
1082	/* Now the init of the other outqueues */
1083	TAILQ_INIT(&asoc->free_chunks);
1084	TAILQ_INIT(&asoc->control_send_queue);
1085	TAILQ_INIT(&asoc->asconf_send_queue);
1086	TAILQ_INIT(&asoc->send_queue);
1087	TAILQ_INIT(&asoc->sent_queue);
1088	TAILQ_INIT(&asoc->reasmqueue);
1089	TAILQ_INIT(&asoc->resetHead);
1090	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1091	TAILQ_INIT(&asoc->asconf_queue);
1092	/* authentication fields */
1093	asoc->authinfo.random = NULL;
1094	asoc->authinfo.active_keyid = 0;
1095	asoc->authinfo.assoc_key = NULL;
1096	asoc->authinfo.assoc_keyid = 0;
1097	asoc->authinfo.recv_key = NULL;
1098	asoc->authinfo.recv_keyid = 0;
1099	LIST_INIT(&asoc->shared_keys);
1100	asoc->marked_retrans = 0;
1101	asoc->port = inp->sctp_ep.port;
1102	asoc->timoinit = 0;
1103	asoc->timodata = 0;
1104	asoc->timosack = 0;
1105	asoc->timoshutdown = 0;
1106	asoc->timoheartbeat = 0;
1107	asoc->timocookie = 0;
1108	asoc->timoshutdownack = 0;
1109	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1110	asoc->discontinuity_time = asoc->start_time;
1111	/*
1112	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1113	 * freed later when the association is freed.
1114	 */
1115	return (0);
1116}
1117
1118void
1119sctp_print_mapping_array(struct sctp_association *asoc)
1120{
1121	unsigned int i, limit;
1122
1123	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1124	    asoc->mapping_array_size,
1125	    asoc->mapping_array_base_tsn,
1126	    asoc->cumulative_tsn,
1127	    asoc->highest_tsn_inside_map,
1128	    asoc->highest_tsn_inside_nr_map);
1129	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1130		if (asoc->mapping_array[limit - 1] != 0) {
1131			break;
1132		}
1133	}
1134	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1135	for (i = 0; i < limit; i++) {
1136		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1137	}
1138	if (limit % 16)
1139		SCTP_PRINTF("\n");
1140	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1141		if (asoc->nr_mapping_array[limit - 1]) {
1142			break;
1143		}
1144	}
1145	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1146	for (i = 0; i < limit; i++) {
1147		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1148	}
1149	if (limit % 16)
1150		SCTP_PRINTF("\n");
1151}
1152
1153int
1154sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1155{
1156	/* mapping array needs to grow */
1157	uint8_t *new_array1, *new_array2;
1158	uint32_t new_size;
1159
1160	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1161	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1162	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1163	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1164		/* can't get more, forget it */
1165		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1166		if (new_array1) {
1167			SCTP_FREE(new_array1, SCTP_M_MAP);
1168		}
1169		if (new_array2) {
1170			SCTP_FREE(new_array2, SCTP_M_MAP);
1171		}
1172		return (-1);
1173	}
1174	memset(new_array1, 0, new_size);
1175	memset(new_array2, 0, new_size);
1176	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1177	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1178	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1179	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1180	asoc->mapping_array = new_array1;
1181	asoc->nr_mapping_array = new_array2;
1182	asoc->mapping_array_size = new_size;
1183	return (0);
1184}
1185
1186
1187static void
1188sctp_iterator_work(struct sctp_iterator *it)
1189{
1190	int iteration_count = 0;
1191	int inp_skip = 0;
1192	int first_in = 1;
1193	struct sctp_inpcb *tinp;
1194
1195	SCTP_INP_INFO_RLOCK();
1196	SCTP_ITERATOR_LOCK();
1197	if (it->inp) {
1198		SCTP_INP_RLOCK(it->inp);
1199		SCTP_INP_DECR_REF(it->inp);
1200	}
1201	if (it->inp == NULL) {
1202		/* iterator is complete */
1203done_with_iterator:
1204		SCTP_ITERATOR_UNLOCK();
1205		SCTP_INP_INFO_RUNLOCK();
1206		if (it->function_atend != NULL) {
1207			(*it->function_atend) (it->pointer, it->val);
1208		}
1209		SCTP_FREE(it, SCTP_M_ITER);
1210		return;
1211	}
1212select_a_new_ep:
1213	if (first_in) {
1214		first_in = 0;
1215	} else {
1216		SCTP_INP_RLOCK(it->inp);
1217	}
1218	while (((it->pcb_flags) &&
1219	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1220	    ((it->pcb_features) &&
1221	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1222		/* endpoint flags or features don't match, so keep looking */
1223		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1224			SCTP_INP_RUNLOCK(it->inp);
1225			goto done_with_iterator;
1226		}
1227		tinp = it->inp;
1228		it->inp = LIST_NEXT(it->inp, sctp_list);
1229		SCTP_INP_RUNLOCK(tinp);
1230		if (it->inp == NULL) {
1231			goto done_with_iterator;
1232		}
1233		SCTP_INP_RLOCK(it->inp);
1234	}
1235	/* now go through each assoc which is in the desired state */
1236	if (it->done_current_ep == 0) {
1237		if (it->function_inp != NULL)
1238			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1239		it->done_current_ep = 1;
1240	}
1241	if (it->stcb == NULL) {
1242		/* run the per instance function */
1243		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1244	}
1245	if ((inp_skip) || it->stcb == NULL) {
1246		if (it->function_inp_end != NULL) {
1247			inp_skip = (*it->function_inp_end) (it->inp,
1248			    it->pointer,
1249			    it->val);
1250		}
1251		SCTP_INP_RUNLOCK(it->inp);
1252		goto no_stcb;
1253	}
1254	while (it->stcb) {
1255		SCTP_TCB_LOCK(it->stcb);
1256		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1257			/* not in the right state... keep looking */
1258			SCTP_TCB_UNLOCK(it->stcb);
1259			goto next_assoc;
1260		}
1261		/* see if we have limited out the iterator loop */
1262		iteration_count++;
1263		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1264			/* Pause to let others grab the lock */
1265			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1266			SCTP_TCB_UNLOCK(it->stcb);
1267			SCTP_INP_INCR_REF(it->inp);
1268			SCTP_INP_RUNLOCK(it->inp);
1269			SCTP_ITERATOR_UNLOCK();
1270			SCTP_INP_INFO_RUNLOCK();
1271			SCTP_INP_INFO_RLOCK();
1272			SCTP_ITERATOR_LOCK();
1273			if (sctp_it_ctl.iterator_flags) {
1274				/* We won't be staying here */
1275				SCTP_INP_DECR_REF(it->inp);
1276				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1277				if (sctp_it_ctl.iterator_flags &
1278				    SCTP_ITERATOR_STOP_CUR_IT) {
1279					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1280					goto done_with_iterator;
1281				}
1282				if (sctp_it_ctl.iterator_flags &
1283				    SCTP_ITERATOR_STOP_CUR_INP) {
1284					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1285					goto no_stcb;
1286				}
1287				/* If we reach here huh? */
1288				SCTP_PRINTF("Unknown it ctl flag %x\n",
1289				    sctp_it_ctl.iterator_flags);
1290				sctp_it_ctl.iterator_flags = 0;
1291			}
1292			SCTP_INP_RLOCK(it->inp);
1293			SCTP_INP_DECR_REF(it->inp);
1294			SCTP_TCB_LOCK(it->stcb);
1295			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1296			iteration_count = 0;
1297		}
1298		/* run function on this one */
1299		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1300
1301		/*
1302		 * we lie here, it really needs to have its own type but
1303		 * first I must verify that this won't effect things :-0
1304		 */
1305		if (it->no_chunk_output == 0)
1306			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1307
1308		SCTP_TCB_UNLOCK(it->stcb);
1309next_assoc:
1310		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1311		if (it->stcb == NULL) {
1312			/* Run last function */
1313			if (it->function_inp_end != NULL) {
1314				inp_skip = (*it->function_inp_end) (it->inp,
1315				    it->pointer,
1316				    it->val);
1317			}
1318		}
1319	}
1320	SCTP_INP_RUNLOCK(it->inp);
1321no_stcb:
1322	/* done with all assocs on this endpoint, move on to next endpoint */
1323	it->done_current_ep = 0;
1324	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1325		it->inp = NULL;
1326	} else {
1327		it->inp = LIST_NEXT(it->inp, sctp_list);
1328	}
1329	if (it->inp == NULL) {
1330		goto done_with_iterator;
1331	}
1332	goto select_a_new_ep;
1333}
1334
1335void
1336sctp_iterator_worker(void)
1337{
1338	struct sctp_iterator *it, *nit;
1339
1340	/* This function is called with the WQ lock in place */
1341
1342	sctp_it_ctl.iterator_running = 1;
1343	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1344		sctp_it_ctl.cur_it = it;
1345		/* now lets work on this one */
1346		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1347		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1348		CURVNET_SET(it->vn);
1349		sctp_iterator_work(it);
1350		sctp_it_ctl.cur_it = NULL;
1351		CURVNET_RESTORE();
1352		SCTP_IPI_ITERATOR_WQ_LOCK();
1353		/* sa_ignore FREED_MEMORY */
1354	}
1355	sctp_it_ctl.iterator_running = 0;
1356	return;
1357}
1358
1359
1360static void
1361sctp_handle_addr_wq(void)
1362{
1363	/* deal with the ADDR wq from the rtsock calls */
1364	struct sctp_laddr *wi, *nwi;
1365	struct sctp_asconf_iterator *asc;
1366
1367	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1368	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1369	if (asc == NULL) {
1370		/* Try later, no memory */
1371		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1372		    (struct sctp_inpcb *)NULL,
1373		    (struct sctp_tcb *)NULL,
1374		    (struct sctp_nets *)NULL);
1375		return;
1376	}
1377	LIST_INIT(&asc->list_of_work);
1378	asc->cnt = 0;
1379
1380	SCTP_WQ_ADDR_LOCK();
1381	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1382		LIST_REMOVE(wi, sctp_nxt_addr);
1383		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1384		asc->cnt++;
1385	}
1386	SCTP_WQ_ADDR_UNLOCK();
1387
1388	if (asc->cnt == 0) {
1389		SCTP_FREE(asc, SCTP_M_ASC_IT);
1390	} else {
1391		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1392		    sctp_asconf_iterator_stcb,
1393		    NULL,	/* No ep end for boundall */
1394		    SCTP_PCB_FLAGS_BOUNDALL,
1395		    SCTP_PCB_ANY_FEATURES,
1396		    SCTP_ASOC_ANY_STATE,
1397		    (void *)asc, 0,
1398		    sctp_asconf_iterator_end, NULL, 0);
1399	}
1400}
1401
1402void
1403sctp_timeout_handler(void *t)
1404{
1405	struct sctp_inpcb *inp;
1406	struct sctp_tcb *stcb;
1407	struct sctp_nets *net;
1408	struct sctp_timer *tmr;
1409
1410#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1411	struct socket *so;
1412
1413#endif
1414	int did_output, type;
1415
1416	tmr = (struct sctp_timer *)t;
1417	inp = (struct sctp_inpcb *)tmr->ep;
1418	stcb = (struct sctp_tcb *)tmr->tcb;
1419	net = (struct sctp_nets *)tmr->net;
1420	CURVNET_SET((struct vnet *)tmr->vnet);
1421	did_output = 1;
1422
1423#ifdef SCTP_AUDITING_ENABLED
1424	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1425	sctp_auditing(3, inp, stcb, net);
1426#endif
1427
1428	/* sanity checks... */
1429	if (tmr->self != (void *)tmr) {
1430		/*
1431		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1432		 * (void *)tmr);
1433		 */
1434		CURVNET_RESTORE();
1435		return;
1436	}
1437	tmr->stopped_from = 0xa001;
1438	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1439		/*
1440		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1441		 * tmr->type);
1442		 */
1443		CURVNET_RESTORE();
1444		return;
1445	}
1446	tmr->stopped_from = 0xa002;
1447	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1448		CURVNET_RESTORE();
1449		return;
1450	}
1451	/* if this is an iterator timeout, get the struct and clear inp */
1452	tmr->stopped_from = 0xa003;
1453	type = tmr->type;
1454	if (inp) {
1455		SCTP_INP_INCR_REF(inp);
1456		if ((inp->sctp_socket == NULL) &&
1457		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1458		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1459		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1460		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1461		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1462		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1463		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1464		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1465		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1466		    ) {
1467			SCTP_INP_DECR_REF(inp);
1468			CURVNET_RESTORE();
1469			return;
1470		}
1471	}
1472	tmr->stopped_from = 0xa004;
1473	if (stcb) {
1474		atomic_add_int(&stcb->asoc.refcnt, 1);
1475		if (stcb->asoc.state == 0) {
1476			atomic_add_int(&stcb->asoc.refcnt, -1);
1477			if (inp) {
1478				SCTP_INP_DECR_REF(inp);
1479			}
1480			CURVNET_RESTORE();
1481			return;
1482		}
1483	}
1484	tmr->stopped_from = 0xa005;
1485	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1486	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1487		if (inp) {
1488			SCTP_INP_DECR_REF(inp);
1489		}
1490		if (stcb) {
1491			atomic_add_int(&stcb->asoc.refcnt, -1);
1492		}
1493		CURVNET_RESTORE();
1494		return;
1495	}
1496	tmr->stopped_from = 0xa006;
1497
1498	if (stcb) {
1499		SCTP_TCB_LOCK(stcb);
1500		atomic_add_int(&stcb->asoc.refcnt, -1);
1501		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1502		    ((stcb->asoc.state == 0) ||
1503		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1504			SCTP_TCB_UNLOCK(stcb);
1505			if (inp) {
1506				SCTP_INP_DECR_REF(inp);
1507			}
1508			CURVNET_RESTORE();
1509			return;
1510		}
1511	}
1512	/* record in stopped what t-o occured */
1513	tmr->stopped_from = tmr->type;
1514
1515	/* mark as being serviced now */
1516	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1517		/*
1518		 * Callout has been rescheduled.
1519		 */
1520		goto get_out;
1521	}
1522	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1523		/*
1524		 * Not active, so no action.
1525		 */
1526		goto get_out;
1527	}
1528	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1529
1530	/* call the handler for the appropriate timer type */
1531	switch (tmr->type) {
1532	case SCTP_TIMER_TYPE_ZERO_COPY:
1533		if (inp == NULL) {
1534			break;
1535		}
1536		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1537			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1538		}
1539		break;
1540	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1541		if (inp == NULL) {
1542			break;
1543		}
1544		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1545			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1546		}
1547		break;
1548	case SCTP_TIMER_TYPE_ADDR_WQ:
1549		sctp_handle_addr_wq();
1550		break;
1551	case SCTP_TIMER_TYPE_SEND:
1552		if ((stcb == NULL) || (inp == NULL)) {
1553			break;
1554		}
1555		SCTP_STAT_INCR(sctps_timodata);
1556		stcb->asoc.timodata++;
1557		stcb->asoc.num_send_timers_up--;
1558		if (stcb->asoc.num_send_timers_up < 0) {
1559			stcb->asoc.num_send_timers_up = 0;
1560		}
1561		SCTP_TCB_LOCK_ASSERT(stcb);
1562		if (sctp_t3rxt_timer(inp, stcb, net)) {
1563			/* no need to unlock on tcb its gone */
1564
1565			goto out_decr;
1566		}
1567		SCTP_TCB_LOCK_ASSERT(stcb);
1568#ifdef SCTP_AUDITING_ENABLED
1569		sctp_auditing(4, inp, stcb, net);
1570#endif
1571		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1572		if ((stcb->asoc.num_send_timers_up == 0) &&
1573		    (stcb->asoc.sent_queue_cnt > 0)) {
1574			struct sctp_tmit_chunk *chk;
1575
1576			/*
1577			 * safeguard. If there on some on the sent queue
1578			 * somewhere but no timers running something is
1579			 * wrong... so we start a timer on the first chunk
1580			 * on the send queue on whatever net it is sent to.
1581			 */
1582			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1583			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1584			    chk->whoTo);
1585		}
1586		break;
1587	case SCTP_TIMER_TYPE_INIT:
1588		if ((stcb == NULL) || (inp == NULL)) {
1589			break;
1590		}
1591		SCTP_STAT_INCR(sctps_timoinit);
1592		stcb->asoc.timoinit++;
1593		if (sctp_t1init_timer(inp, stcb, net)) {
1594			/* no need to unlock on tcb its gone */
1595			goto out_decr;
1596		}
1597		/* We do output but not here */
1598		did_output = 0;
1599		break;
1600	case SCTP_TIMER_TYPE_RECV:
1601		if ((stcb == NULL) || (inp == NULL)) {
1602			break;
1603		}
1604		SCTP_STAT_INCR(sctps_timosack);
1605		stcb->asoc.timosack++;
1606		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1607#ifdef SCTP_AUDITING_ENABLED
1608		sctp_auditing(4, inp, stcb, net);
1609#endif
1610		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1611		break;
1612	case SCTP_TIMER_TYPE_SHUTDOWN:
1613		if ((stcb == NULL) || (inp == NULL)) {
1614			break;
1615		}
1616		if (sctp_shutdown_timer(inp, stcb, net)) {
1617			/* no need to unlock on tcb its gone */
1618			goto out_decr;
1619		}
1620		SCTP_STAT_INCR(sctps_timoshutdown);
1621		stcb->asoc.timoshutdown++;
1622#ifdef SCTP_AUDITING_ENABLED
1623		sctp_auditing(4, inp, stcb, net);
1624#endif
1625		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1626		break;
1627	case SCTP_TIMER_TYPE_HEARTBEAT:
1628		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1629			break;
1630		}
1631		SCTP_STAT_INCR(sctps_timoheartbeat);
1632		stcb->asoc.timoheartbeat++;
1633		if (sctp_heartbeat_timer(inp, stcb, net)) {
1634			/* no need to unlock on tcb its gone */
1635			goto out_decr;
1636		}
1637#ifdef SCTP_AUDITING_ENABLED
1638		sctp_auditing(4, inp, stcb, net);
1639#endif
1640		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1641			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1642			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1643		}
1644		break;
1645	case SCTP_TIMER_TYPE_COOKIE:
1646		if ((stcb == NULL) || (inp == NULL)) {
1647			break;
1648		}
1649		if (sctp_cookie_timer(inp, stcb, net)) {
1650			/* no need to unlock on tcb its gone */
1651			goto out_decr;
1652		}
1653		SCTP_STAT_INCR(sctps_timocookie);
1654		stcb->asoc.timocookie++;
1655#ifdef SCTP_AUDITING_ENABLED
1656		sctp_auditing(4, inp, stcb, net);
1657#endif
1658		/*
1659		 * We consider T3 and Cookie timer pretty much the same with
1660		 * respect to where from in chunk_output.
1661		 */
1662		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1663		break;
1664	case SCTP_TIMER_TYPE_NEWCOOKIE:
1665		{
1666			struct timeval tv;
1667			int i, secret;
1668
1669			if (inp == NULL) {
1670				break;
1671			}
1672			SCTP_STAT_INCR(sctps_timosecret);
1673			(void)SCTP_GETTIME_TIMEVAL(&tv);
1674			SCTP_INP_WLOCK(inp);
1675			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1676			inp->sctp_ep.last_secret_number =
1677			    inp->sctp_ep.current_secret_number;
1678			inp->sctp_ep.current_secret_number++;
1679			if (inp->sctp_ep.current_secret_number >=
1680			    SCTP_HOW_MANY_SECRETS) {
1681				inp->sctp_ep.current_secret_number = 0;
1682			}
1683			secret = (int)inp->sctp_ep.current_secret_number;
1684			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1685				inp->sctp_ep.secret_key[secret][i] =
1686				    sctp_select_initial_TSN(&inp->sctp_ep);
1687			}
1688			SCTP_INP_WUNLOCK(inp);
1689			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1690		}
1691		did_output = 0;
1692		break;
1693	case SCTP_TIMER_TYPE_PATHMTURAISE:
1694		if ((stcb == NULL) || (inp == NULL)) {
1695			break;
1696		}
1697		SCTP_STAT_INCR(sctps_timopathmtu);
1698		sctp_pathmtu_timer(inp, stcb, net);
1699		did_output = 0;
1700		break;
1701	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1702		if ((stcb == NULL) || (inp == NULL)) {
1703			break;
1704		}
1705		if (sctp_shutdownack_timer(inp, stcb, net)) {
1706			/* no need to unlock on tcb its gone */
1707			goto out_decr;
1708		}
1709		SCTP_STAT_INCR(sctps_timoshutdownack);
1710		stcb->asoc.timoshutdownack++;
1711#ifdef SCTP_AUDITING_ENABLED
1712		sctp_auditing(4, inp, stcb, net);
1713#endif
1714		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1715		break;
1716	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1717		if ((stcb == NULL) || (inp == NULL)) {
1718			break;
1719		}
1720		SCTP_STAT_INCR(sctps_timoshutdownguard);
1721		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1722		/* no need to unlock on tcb its gone */
1723		goto out_decr;
1724
1725	case SCTP_TIMER_TYPE_STRRESET:
1726		if ((stcb == NULL) || (inp == NULL)) {
1727			break;
1728		}
1729		if (sctp_strreset_timer(inp, stcb, net)) {
1730			/* no need to unlock on tcb its gone */
1731			goto out_decr;
1732		}
1733		SCTP_STAT_INCR(sctps_timostrmrst);
1734		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1735		break;
1736	case SCTP_TIMER_TYPE_ASCONF:
1737		if ((stcb == NULL) || (inp == NULL)) {
1738			break;
1739		}
1740		if (sctp_asconf_timer(inp, stcb, net)) {
1741			/* no need to unlock on tcb its gone */
1742			goto out_decr;
1743		}
1744		SCTP_STAT_INCR(sctps_timoasconf);
1745#ifdef SCTP_AUDITING_ENABLED
1746		sctp_auditing(4, inp, stcb, net);
1747#endif
1748		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1749		break;
1750	case SCTP_TIMER_TYPE_PRIM_DELETED:
1751		if ((stcb == NULL) || (inp == NULL)) {
1752			break;
1753		}
1754		sctp_delete_prim_timer(inp, stcb, net);
1755		SCTP_STAT_INCR(sctps_timodelprim);
1756		break;
1757
1758	case SCTP_TIMER_TYPE_AUTOCLOSE:
1759		if ((stcb == NULL) || (inp == NULL)) {
1760			break;
1761		}
1762		SCTP_STAT_INCR(sctps_timoautoclose);
1763		sctp_autoclose_timer(inp, stcb, net);
1764		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1765		did_output = 0;
1766		break;
1767	case SCTP_TIMER_TYPE_ASOCKILL:
1768		if ((stcb == NULL) || (inp == NULL)) {
1769			break;
1770		}
1771		SCTP_STAT_INCR(sctps_timoassockill);
1772		/* Can we free it yet? */
1773		SCTP_INP_DECR_REF(inp);
1774		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1775#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1776		so = SCTP_INP_SO(inp);
1777		atomic_add_int(&stcb->asoc.refcnt, 1);
1778		SCTP_TCB_UNLOCK(stcb);
1779		SCTP_SOCKET_LOCK(so, 1);
1780		SCTP_TCB_LOCK(stcb);
1781		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1782#endif
1783		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1784#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1785		SCTP_SOCKET_UNLOCK(so, 1);
1786#endif
1787		/*
1788		 * free asoc, always unlocks (or destroy's) so prevent
1789		 * duplicate unlock or unlock of a free mtx :-0
1790		 */
1791		stcb = NULL;
1792		goto out_no_decr;
1793	case SCTP_TIMER_TYPE_INPKILL:
1794		SCTP_STAT_INCR(sctps_timoinpkill);
1795		if (inp == NULL) {
1796			break;
1797		}
1798		/*
1799		 * special case, take away our increment since WE are the
1800		 * killer
1801		 */
1802		SCTP_INP_DECR_REF(inp);
1803		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1804		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1805		    SCTP_CALLED_FROM_INPKILL_TIMER);
1806		inp = NULL;
1807		goto out_no_decr;
1808	default:
1809		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1810		    tmr->type);
1811		break;
1812	}
1813#ifdef SCTP_AUDITING_ENABLED
1814	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1815	if (inp)
1816		sctp_auditing(5, inp, stcb, net);
1817#endif
1818	if ((did_output) && stcb) {
1819		/*
1820		 * Now we need to clean up the control chunk chain if an
1821		 * ECNE is on it. It must be marked as UNSENT again so next
1822		 * call will continue to send it until such time that we get
1823		 * a CWR, to remove it. It is, however, less likely that we
1824		 * will find a ecn echo on the chain though.
1825		 */
1826		sctp_fix_ecn_echo(&stcb->asoc);
1827	}
1828get_out:
1829	if (stcb) {
1830		SCTP_TCB_UNLOCK(stcb);
1831	}
1832out_decr:
1833	if (inp) {
1834		SCTP_INP_DECR_REF(inp);
1835	}
1836out_no_decr:
1837	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1838	    type);
1839	CURVNET_RESTORE();
1840}
1841
1842void
1843sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1844    struct sctp_nets *net)
1845{
1846	uint32_t to_ticks;
1847	struct sctp_timer *tmr;
1848
1849	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1850		return;
1851
1852	tmr = NULL;
1853	if (stcb) {
1854		SCTP_TCB_LOCK_ASSERT(stcb);
1855	}
1856	switch (t_type) {
1857	case SCTP_TIMER_TYPE_ZERO_COPY:
1858		tmr = &inp->sctp_ep.zero_copy_timer;
1859		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1860		break;
1861	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1862		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1863		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1864		break;
1865	case SCTP_TIMER_TYPE_ADDR_WQ:
1866		/* Only 1 tick away :-) */
1867		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1868		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1869		break;
1870	case SCTP_TIMER_TYPE_SEND:
1871		/* Here we use the RTO timer */
1872		{
1873			int rto_val;
1874
1875			if ((stcb == NULL) || (net == NULL)) {
1876				return;
1877			}
1878			tmr = &net->rxt_timer;
1879			if (net->RTO == 0) {
1880				rto_val = stcb->asoc.initial_rto;
1881			} else {
1882				rto_val = net->RTO;
1883			}
1884			to_ticks = MSEC_TO_TICKS(rto_val);
1885		}
1886		break;
1887	case SCTP_TIMER_TYPE_INIT:
1888		/*
1889		 * Here we use the INIT timer default usually about 1
1890		 * minute.
1891		 */
1892		if ((stcb == NULL) || (net == NULL)) {
1893			return;
1894		}
1895		tmr = &net->rxt_timer;
1896		if (net->RTO == 0) {
1897			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1898		} else {
1899			to_ticks = MSEC_TO_TICKS(net->RTO);
1900		}
1901		break;
1902	case SCTP_TIMER_TYPE_RECV:
1903		/*
1904		 * Here we use the Delayed-Ack timer value from the inp
1905		 * ususually about 200ms.
1906		 */
1907		if (stcb == NULL) {
1908			return;
1909		}
1910		tmr = &stcb->asoc.dack_timer;
1911		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1912		break;
1913	case SCTP_TIMER_TYPE_SHUTDOWN:
1914		/* Here we use the RTO of the destination. */
1915		if ((stcb == NULL) || (net == NULL)) {
1916			return;
1917		}
1918		if (net->RTO == 0) {
1919			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1920		} else {
1921			to_ticks = MSEC_TO_TICKS(net->RTO);
1922		}
1923		tmr = &net->rxt_timer;
1924		break;
1925	case SCTP_TIMER_TYPE_HEARTBEAT:
1926		/*
1927		 * the net is used here so that we can add in the RTO. Even
1928		 * though we use a different timer. We also add the HB timer
1929		 * PLUS a random jitter.
1930		 */
1931		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1932			return;
1933		} else {
1934			uint32_t rndval;
1935			uint32_t jitter;
1936
1937			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1938			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1939				return;
1940			}
1941			if (net->RTO == 0) {
1942				to_ticks = stcb->asoc.initial_rto;
1943			} else {
1944				to_ticks = net->RTO;
1945			}
1946			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1947			jitter = rndval % to_ticks;
1948			if (jitter >= (to_ticks >> 1)) {
1949				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1950			} else {
1951				to_ticks = to_ticks - jitter;
1952			}
1953			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1954			    !(net->dest_state & SCTP_ADDR_PF)) {
1955				to_ticks += net->heart_beat_delay;
1956			}
1957			/*
1958			 * Now we must convert the to_ticks that are now in
1959			 * ms to ticks.
1960			 */
1961			to_ticks = MSEC_TO_TICKS(to_ticks);
1962			tmr = &net->hb_timer;
1963		}
1964		break;
1965	case SCTP_TIMER_TYPE_COOKIE:
1966		/*
1967		 * Here we can use the RTO timer from the network since one
1968		 * RTT was compelete. If a retran happened then we will be
1969		 * using the RTO initial value.
1970		 */
1971		if ((stcb == NULL) || (net == NULL)) {
1972			return;
1973		}
1974		if (net->RTO == 0) {
1975			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1976		} else {
1977			to_ticks = MSEC_TO_TICKS(net->RTO);
1978		}
1979		tmr = &net->rxt_timer;
1980		break;
1981	case SCTP_TIMER_TYPE_NEWCOOKIE:
1982		/*
1983		 * nothing needed but the endpoint here ususually about 60
1984		 * minutes.
1985		 */
1986		if (inp == NULL) {
1987			return;
1988		}
1989		tmr = &inp->sctp_ep.signature_change;
1990		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1991		break;
1992	case SCTP_TIMER_TYPE_ASOCKILL:
1993		if (stcb == NULL) {
1994			return;
1995		}
1996		tmr = &stcb->asoc.strreset_timer;
1997		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1998		break;
1999	case SCTP_TIMER_TYPE_INPKILL:
2000		/*
2001		 * The inp is setup to die. We re-use the signature_chage
2002		 * timer since that has stopped and we are in the GONE
2003		 * state.
2004		 */
2005		if (inp == NULL) {
2006			return;
2007		}
2008		tmr = &inp->sctp_ep.signature_change;
2009		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2010		break;
2011	case SCTP_TIMER_TYPE_PATHMTURAISE:
2012		/*
2013		 * Here we use the value found in the EP for PMTU ususually
2014		 * about 10 minutes.
2015		 */
2016		if ((stcb == NULL) || (inp == NULL)) {
2017			return;
2018		}
2019		if (net == NULL) {
2020			return;
2021		}
2022		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2023			return;
2024		}
2025		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2026		tmr = &net->pmtu_timer;
2027		break;
2028	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2029		/* Here we use the RTO of the destination */
2030		if ((stcb == NULL) || (net == NULL)) {
2031			return;
2032		}
2033		if (net->RTO == 0) {
2034			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2035		} else {
2036			to_ticks = MSEC_TO_TICKS(net->RTO);
2037		}
2038		tmr = &net->rxt_timer;
2039		break;
2040	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2041		/*
2042		 * Here we use the endpoints shutdown guard timer usually
2043		 * about 3 minutes.
2044		 */
2045		if ((inp == NULL) || (stcb == NULL)) {
2046			return;
2047		}
2048		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2049		tmr = &stcb->asoc.shut_guard_timer;
2050		break;
2051	case SCTP_TIMER_TYPE_STRRESET:
2052		/*
2053		 * Here the timer comes from the stcb but its value is from
2054		 * the net's RTO.
2055		 */
2056		if ((stcb == NULL) || (net == NULL)) {
2057			return;
2058		}
2059		if (net->RTO == 0) {
2060			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2061		} else {
2062			to_ticks = MSEC_TO_TICKS(net->RTO);
2063		}
2064		tmr = &stcb->asoc.strreset_timer;
2065		break;
2066	case SCTP_TIMER_TYPE_ASCONF:
2067		/*
2068		 * Here the timer comes from the stcb but its value is from
2069		 * the net's RTO.
2070		 */
2071		if ((stcb == NULL) || (net == NULL)) {
2072			return;
2073		}
2074		if (net->RTO == 0) {
2075			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2076		} else {
2077			to_ticks = MSEC_TO_TICKS(net->RTO);
2078		}
2079		tmr = &stcb->asoc.asconf_timer;
2080		break;
2081	case SCTP_TIMER_TYPE_PRIM_DELETED:
2082		if ((stcb == NULL) || (net != NULL)) {
2083			return;
2084		}
2085		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2086		tmr = &stcb->asoc.delete_prim_timer;
2087		break;
2088	case SCTP_TIMER_TYPE_AUTOCLOSE:
2089		if (stcb == NULL) {
2090			return;
2091		}
2092		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2093			/*
2094			 * Really an error since stcb is NOT set to
2095			 * autoclose
2096			 */
2097			return;
2098		}
2099		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2100		tmr = &stcb->asoc.autoclose_timer;
2101		break;
2102	default:
2103		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2104		    __FUNCTION__, t_type);
2105		return;
2106		break;
2107	}
2108	if ((to_ticks <= 0) || (tmr == NULL)) {
2109		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2110		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2111		return;
2112	}
2113	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2114		/*
2115		 * we do NOT allow you to have it already running. if it is
2116		 * we leave the current one up unchanged
2117		 */
2118		return;
2119	}
2120	/* At this point we can proceed */
2121	if (t_type == SCTP_TIMER_TYPE_SEND) {
2122		stcb->asoc.num_send_timers_up++;
2123	}
2124	tmr->stopped_from = 0;
2125	tmr->type = t_type;
2126	tmr->ep = (void *)inp;
2127	tmr->tcb = (void *)stcb;
2128	tmr->net = (void *)net;
2129	tmr->self = (void *)tmr;
2130	tmr->vnet = (void *)curvnet;
2131	tmr->ticks = sctp_get_tick_count();
2132	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2133	return;
2134}
2135
2136void
2137sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2138    struct sctp_nets *net, uint32_t from)
2139{
2140	struct sctp_timer *tmr;
2141
2142	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2143	    (inp == NULL))
2144		return;
2145
2146	tmr = NULL;
2147	if (stcb) {
2148		SCTP_TCB_LOCK_ASSERT(stcb);
2149	}
2150	switch (t_type) {
2151	case SCTP_TIMER_TYPE_ZERO_COPY:
2152		tmr = &inp->sctp_ep.zero_copy_timer;
2153		break;
2154	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2155		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2156		break;
2157	case SCTP_TIMER_TYPE_ADDR_WQ:
2158		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2159		break;
2160	case SCTP_TIMER_TYPE_SEND:
2161		if ((stcb == NULL) || (net == NULL)) {
2162			return;
2163		}
2164		tmr = &net->rxt_timer;
2165		break;
2166	case SCTP_TIMER_TYPE_INIT:
2167		if ((stcb == NULL) || (net == NULL)) {
2168			return;
2169		}
2170		tmr = &net->rxt_timer;
2171		break;
2172	case SCTP_TIMER_TYPE_RECV:
2173		if (stcb == NULL) {
2174			return;
2175		}
2176		tmr = &stcb->asoc.dack_timer;
2177		break;
2178	case SCTP_TIMER_TYPE_SHUTDOWN:
2179		if ((stcb == NULL) || (net == NULL)) {
2180			return;
2181		}
2182		tmr = &net->rxt_timer;
2183		break;
2184	case SCTP_TIMER_TYPE_HEARTBEAT:
2185		if ((stcb == NULL) || (net == NULL)) {
2186			return;
2187		}
2188		tmr = &net->hb_timer;
2189		break;
2190	case SCTP_TIMER_TYPE_COOKIE:
2191		if ((stcb == NULL) || (net == NULL)) {
2192			return;
2193		}
2194		tmr = &net->rxt_timer;
2195		break;
2196	case SCTP_TIMER_TYPE_NEWCOOKIE:
2197		/* nothing needed but the endpoint here */
2198		tmr = &inp->sctp_ep.signature_change;
2199		/*
2200		 * We re-use the newcookie timer for the INP kill timer. We
2201		 * must assure that we do not kill it by accident.
2202		 */
2203		break;
2204	case SCTP_TIMER_TYPE_ASOCKILL:
2205		/*
2206		 * Stop the asoc kill timer.
2207		 */
2208		if (stcb == NULL) {
2209			return;
2210		}
2211		tmr = &stcb->asoc.strreset_timer;
2212		break;
2213
2214	case SCTP_TIMER_TYPE_INPKILL:
2215		/*
2216		 * The inp is setup to die. We re-use the signature_chage
2217		 * timer since that has stopped and we are in the GONE
2218		 * state.
2219		 */
2220		tmr = &inp->sctp_ep.signature_change;
2221		break;
2222	case SCTP_TIMER_TYPE_PATHMTURAISE:
2223		if ((stcb == NULL) || (net == NULL)) {
2224			return;
2225		}
2226		tmr = &net->pmtu_timer;
2227		break;
2228	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2229		if ((stcb == NULL) || (net == NULL)) {
2230			return;
2231		}
2232		tmr = &net->rxt_timer;
2233		break;
2234	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2235		if (stcb == NULL) {
2236			return;
2237		}
2238		tmr = &stcb->asoc.shut_guard_timer;
2239		break;
2240	case SCTP_TIMER_TYPE_STRRESET:
2241		if (stcb == NULL) {
2242			return;
2243		}
2244		tmr = &stcb->asoc.strreset_timer;
2245		break;
2246	case SCTP_TIMER_TYPE_ASCONF:
2247		if (stcb == NULL) {
2248			return;
2249		}
2250		tmr = &stcb->asoc.asconf_timer;
2251		break;
2252	case SCTP_TIMER_TYPE_PRIM_DELETED:
2253		if (stcb == NULL) {
2254			return;
2255		}
2256		tmr = &stcb->asoc.delete_prim_timer;
2257		break;
2258	case SCTP_TIMER_TYPE_AUTOCLOSE:
2259		if (stcb == NULL) {
2260			return;
2261		}
2262		tmr = &stcb->asoc.autoclose_timer;
2263		break;
2264	default:
2265		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2266		    __FUNCTION__, t_type);
2267		break;
2268	}
2269	if (tmr == NULL) {
2270		return;
2271	}
2272	if ((tmr->type != t_type) && tmr->type) {
2273		/*
2274		 * Ok we have a timer that is under joint use. Cookie timer
2275		 * per chance with the SEND timer. We therefore are NOT
2276		 * running the timer that the caller wants stopped.  So just
2277		 * return.
2278		 */
2279		return;
2280	}
2281	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2282		stcb->asoc.num_send_timers_up--;
2283		if (stcb->asoc.num_send_timers_up < 0) {
2284			stcb->asoc.num_send_timers_up = 0;
2285		}
2286	}
2287	tmr->self = NULL;
2288	tmr->stopped_from = from;
2289	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2290	return;
2291}
2292
2293uint32_t
2294sctp_calculate_len(struct mbuf *m)
2295{
2296	uint32_t tlen = 0;
2297	struct mbuf *at;
2298
2299	at = m;
2300	while (at) {
2301		tlen += SCTP_BUF_LEN(at);
2302		at = SCTP_BUF_NEXT(at);
2303	}
2304	return (tlen);
2305}
2306
2307void
2308sctp_mtu_size_reset(struct sctp_inpcb *inp,
2309    struct sctp_association *asoc, uint32_t mtu)
2310{
2311	/*
2312	 * Reset the P-MTU size on this association, this involves changing
2313	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2314	 * allow the DF flag to be cleared.
2315	 */
2316	struct sctp_tmit_chunk *chk;
2317	unsigned int eff_mtu, ovh;
2318
2319	asoc->smallest_mtu = mtu;
2320	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2321		ovh = SCTP_MIN_OVERHEAD;
2322	} else {
2323		ovh = SCTP_MIN_V4_OVERHEAD;
2324	}
2325	eff_mtu = mtu - ovh;
2326	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2327		if (chk->send_size > eff_mtu) {
2328			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2329		}
2330	}
2331	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2332		if (chk->send_size > eff_mtu) {
2333			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2334		}
2335	}
2336}
2337
2338
2339/*
2340 * given an association and starting time of the current RTT period return
2341 * RTO in number of msecs net should point to the current network
2342 */
2343
2344uint32_t
2345sctp_calculate_rto(struct sctp_tcb *stcb,
2346    struct sctp_association *asoc,
2347    struct sctp_nets *net,
2348    struct timeval *told,
2349    int safe, int rtt_from_sack)
2350{
2351	/*-
2352	 * given an association and the starting time of the current RTT
2353	 * period (in value1/value2) return RTO in number of msecs.
2354	 */
2355	int32_t rtt;		/* RTT in ms */
2356	uint32_t new_rto;
2357	int first_measure = 0;
2358	struct timeval now, then, *old;
2359
2360	/* Copy it out for sparc64 */
2361	if (safe == sctp_align_unsafe_makecopy) {
2362		old = &then;
2363		memcpy(&then, told, sizeof(struct timeval));
2364	} else if (safe == sctp_align_safe_nocopy) {
2365		old = told;
2366	} else {
2367		/* error */
2368		SCTP_PRINTF("Huh, bad rto calc call\n");
2369		return (0);
2370	}
2371	/************************/
2372	/* 1. calculate new RTT */
2373	/************************/
2374	/* get the current time */
2375	if (stcb->asoc.use_precise_time) {
2376		(void)SCTP_GETPTIME_TIMEVAL(&now);
2377	} else {
2378		(void)SCTP_GETTIME_TIMEVAL(&now);
2379	}
2380	timevalsub(&now, old);
2381	/* store the current RTT in us */
2382	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2383	        (uint64_t) now.tv_usec;
2384
2385	/* computer rtt in ms */
2386	rtt = net->rtt / 1000;
2387	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2388		/*
2389		 * Tell the CC module that a new update has just occurred
2390		 * from a sack
2391		 */
2392		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2393	}
2394	/*
2395	 * Do we need to determine the lan? We do this only on sacks i.e.
2396	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2397	 */
2398	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2399	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2400		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2401			net->lan_type = SCTP_LAN_INTERNET;
2402		} else {
2403			net->lan_type = SCTP_LAN_LOCAL;
2404		}
2405	}
2406	/***************************/
2407	/* 2. update RTTVAR & SRTT */
2408	/***************************/
2409	/*-
2410	 * Compute the scaled average lastsa and the
2411	 * scaled variance lastsv as described in van Jacobson
2412	 * Paper "Congestion Avoidance and Control", Annex A.
2413	 *
2414	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2415	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2416	 */
2417	if (net->RTO_measured) {
2418		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2419		net->lastsa += rtt;
2420		if (rtt < 0) {
2421			rtt = -rtt;
2422		}
2423		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2424		net->lastsv += rtt;
2425		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2426			rto_logging(net, SCTP_LOG_RTTVAR);
2427		}
2428	} else {
2429		/* First RTO measurment */
2430		net->RTO_measured = 1;
2431		first_measure = 1;
2432		net->lastsa = rtt << SCTP_RTT_SHIFT;
2433		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2434		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2435			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2436		}
2437	}
2438	if (net->lastsv == 0) {
2439		net->lastsv = SCTP_CLOCK_GRANULARITY;
2440	}
2441	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2442	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2443	    (stcb->asoc.sat_network_lockout == 0)) {
2444		stcb->asoc.sat_network = 1;
2445	} else if ((!first_measure) && stcb->asoc.sat_network) {
2446		stcb->asoc.sat_network = 0;
2447		stcb->asoc.sat_network_lockout = 1;
2448	}
2449	/* bound it, per C6/C7 in Section 5.3.1 */
2450	if (new_rto < stcb->asoc.minrto) {
2451		new_rto = stcb->asoc.minrto;
2452	}
2453	if (new_rto > stcb->asoc.maxrto) {
2454		new_rto = stcb->asoc.maxrto;
2455	}
2456	/* we are now returning the RTO */
2457	return (new_rto);
2458}
2459
2460/*
2461 * return a pointer to a contiguous piece of data from the given mbuf chain
2462 * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2463 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2464 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2465 */
2466caddr_t
2467sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2468{
2469	uint32_t count;
2470	uint8_t *ptr;
2471
2472	ptr = in_ptr;
2473	if ((off < 0) || (len <= 0))
2474		return (NULL);
2475
2476	/* find the desired start location */
2477	while ((m != NULL) && (off > 0)) {
2478		if (off < SCTP_BUF_LEN(m))
2479			break;
2480		off -= SCTP_BUF_LEN(m);
2481		m = SCTP_BUF_NEXT(m);
2482	}
2483	if (m == NULL)
2484		return (NULL);
2485
2486	/* is the current mbuf large enough (eg. contiguous)? */
2487	if ((SCTP_BUF_LEN(m) - off) >= len) {
2488		return (mtod(m, caddr_t)+off);
2489	} else {
2490		/* else, it spans more than one mbuf, so save a temp copy... */
2491		while ((m != NULL) && (len > 0)) {
2492			count = min(SCTP_BUF_LEN(m) - off, len);
2493			bcopy(mtod(m, caddr_t)+off, ptr, count);
2494			len -= count;
2495			ptr += count;
2496			off = 0;
2497			m = SCTP_BUF_NEXT(m);
2498		}
2499		if ((m == NULL) && (len > 0))
2500			return (NULL);
2501		else
2502			return ((caddr_t)in_ptr);
2503	}
2504}
2505
2506
2507
2508struct sctp_paramhdr *
2509sctp_get_next_param(struct mbuf *m,
2510    int offset,
2511    struct sctp_paramhdr *pull,
2512    int pull_limit)
2513{
2514	/* This just provides a typed signature to Peter's Pull routine */
2515	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2516	    (uint8_t *) pull));
2517}
2518
2519
2520struct mbuf *
2521sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2522{
2523	struct mbuf *m_last;
2524	caddr_t dp;
2525
2526	if (padlen > 3) {
2527		return (NULL);
2528	}
2529	if (padlen <= M_TRAILINGSPACE(m)) {
2530		/*
2531		 * The easy way. We hope the majority of the time we hit
2532		 * here :)
2533		 */
2534		m_last = m;
2535	} else {
2536		/* Hard way we must grow the mbuf chain */
2537		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2538		if (m_last == NULL) {
2539			return (NULL);
2540		}
2541		SCTP_BUF_LEN(m_last) = 0;
2542		SCTP_BUF_NEXT(m_last) = NULL;
2543		SCTP_BUF_NEXT(m) = m_last;
2544	}
2545	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2546	SCTP_BUF_LEN(m_last) += padlen;
2547	memset(dp, 0, padlen);
2548	return (m_last);
2549}
2550
2551struct mbuf *
2552sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2553{
2554	/* find the last mbuf in chain and pad it */
2555	struct mbuf *m_at;
2556
2557	if (last_mbuf != NULL) {
2558		return (sctp_add_pad_tombuf(last_mbuf, padval));
2559	} else {
2560		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2561			if (SCTP_BUF_NEXT(m_at) == NULL) {
2562				return (sctp_add_pad_tombuf(m_at, padval));
2563			}
2564		}
2565	}
2566	return (NULL);
2567}
2568
2569static void
2570sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2571    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2572#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2573    SCTP_UNUSED
2574#endif
2575)
2576{
2577	struct mbuf *m_notify;
2578	struct sctp_assoc_change *sac;
2579	struct sctp_queued_to_read *control;
2580	size_t notif_len, abort_len;
2581	unsigned int i;
2582
2583#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2584	struct socket *so;
2585
2586#endif
2587
2588	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2589		notif_len = sizeof(struct sctp_assoc_change);
2590		if (abort != NULL) {
2591			abort_len = ntohs(abort->ch.chunk_length);
2592		} else {
2593			abort_len = 0;
2594		}
2595		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2596			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2597		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2598			notif_len += abort_len;
2599		}
2600		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2601		if (m_notify == NULL) {
2602			/* Retry with smaller value. */
2603			notif_len = sizeof(struct sctp_assoc_change);
2604			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2605			if (m_notify == NULL) {
2606				goto set_error;
2607			}
2608		}
2609		SCTP_BUF_NEXT(m_notify) = NULL;
2610		sac = mtod(m_notify, struct sctp_assoc_change *);
2611		memset(sac, 0, notif_len);
2612		sac->sac_type = SCTP_ASSOC_CHANGE;
2613		sac->sac_flags = 0;
2614		sac->sac_length = sizeof(struct sctp_assoc_change);
2615		sac->sac_state = state;
2616		sac->sac_error = error;
2617		/* XXX verify these stream counts */
2618		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2619		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2620		sac->sac_assoc_id = sctp_get_associd(stcb);
2621		if (notif_len > sizeof(struct sctp_assoc_change)) {
2622			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2623				i = 0;
2624				if (stcb->asoc.prsctp_supported) {
2625					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2626				}
2627				if (stcb->asoc.peer_supports_auth) {
2628					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2629				}
2630				if (stcb->asoc.peer_supports_asconf) {
2631					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2632				}
2633				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2634				if (stcb->asoc.peer_supports_strreset) {
2635					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2636				}
2637				sac->sac_length += i;
2638			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2639				memcpy(sac->sac_info, abort, abort_len);
2640				sac->sac_length += abort_len;
2641			}
2642		}
2643		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2644		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2645		    0, 0, stcb->asoc.context, 0, 0, 0,
2646		    m_notify);
2647		if (control != NULL) {
2648			control->length = SCTP_BUF_LEN(m_notify);
2649			/* not that we need this */
2650			control->tail_mbuf = m_notify;
2651			control->spec_flags = M_NOTIFICATION;
2652			sctp_add_to_readq(stcb->sctp_ep, stcb,
2653			    control,
2654			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2655			    so_locked);
2656		} else {
2657			sctp_m_freem(m_notify);
2658		}
2659	}
2660	/*
2661	 * For 1-to-1 style sockets, we send up and error when an ABORT
2662	 * comes in.
2663	 */
2664set_error:
2665	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2666	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2667	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2668		SOCK_LOCK(stcb->sctp_socket);
2669		if (from_peer) {
2670			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2671				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2672				stcb->sctp_socket->so_error = ECONNREFUSED;
2673			} else {
2674				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2675				stcb->sctp_socket->so_error = ECONNRESET;
2676			}
2677		} else {
2678			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2679			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2680				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2681				stcb->sctp_socket->so_error = ETIMEDOUT;
2682			} else {
2683				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2684				stcb->sctp_socket->so_error = ECONNABORTED;
2685			}
2686		}
2687	}
2688	/* Wake ANY sleepers */
2689#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2690	so = SCTP_INP_SO(stcb->sctp_ep);
2691	if (!so_locked) {
2692		atomic_add_int(&stcb->asoc.refcnt, 1);
2693		SCTP_TCB_UNLOCK(stcb);
2694		SCTP_SOCKET_LOCK(so, 1);
2695		SCTP_TCB_LOCK(stcb);
2696		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2697		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2698			SCTP_SOCKET_UNLOCK(so, 1);
2699			return;
2700		}
2701	}
2702#endif
2703	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2704	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2705	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2706		socantrcvmore_locked(stcb->sctp_socket);
2707	}
2708	sorwakeup(stcb->sctp_socket);
2709	sowwakeup(stcb->sctp_socket);
2710#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2711	if (!so_locked) {
2712		SCTP_SOCKET_UNLOCK(so, 1);
2713	}
2714#endif
2715}
2716
2717static void
2718sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2719    struct sockaddr *sa, uint32_t error)
2720{
2721	struct mbuf *m_notify;
2722	struct sctp_paddr_change *spc;
2723	struct sctp_queued_to_read *control;
2724
2725	if ((stcb == NULL) ||
2726	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2727		/* event not enabled */
2728		return;
2729	}
2730	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2731	if (m_notify == NULL)
2732		return;
2733	SCTP_BUF_LEN(m_notify) = 0;
2734	spc = mtod(m_notify, struct sctp_paddr_change *);
2735	memset(spc, 0, sizeof(struct sctp_paddr_change));
2736	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2737	spc->spc_flags = 0;
2738	spc->spc_length = sizeof(struct sctp_paddr_change);
2739	switch (sa->sa_family) {
2740#ifdef INET
2741	case AF_INET:
2742		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2743		break;
2744#endif
2745#ifdef INET6
2746	case AF_INET6:
2747		{
2748			struct sockaddr_in6 *sin6;
2749
2750			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2751
2752			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2753			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2754				if (sin6->sin6_scope_id == 0) {
2755					/* recover scope_id for user */
2756					(void)sa6_recoverscope(sin6);
2757				} else {
2758					/* clear embedded scope_id for user */
2759					in6_clearscope(&sin6->sin6_addr);
2760				}
2761			}
2762			break;
2763		}
2764#endif
2765	default:
2766		/* TSNH */
2767		break;
2768	}
2769	spc->spc_state = state;
2770	spc->spc_error = error;
2771	spc->spc_assoc_id = sctp_get_associd(stcb);
2772
2773	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2774	SCTP_BUF_NEXT(m_notify) = NULL;
2775
2776	/* append to socket */
2777	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2778	    0, 0, stcb->asoc.context, 0, 0, 0,
2779	    m_notify);
2780	if (control == NULL) {
2781		/* no memory */
2782		sctp_m_freem(m_notify);
2783		return;
2784	}
2785	control->length = SCTP_BUF_LEN(m_notify);
2786	control->spec_flags = M_NOTIFICATION;
2787	/* not that we need this */
2788	control->tail_mbuf = m_notify;
2789	sctp_add_to_readq(stcb->sctp_ep, stcb,
2790	    control,
2791	    &stcb->sctp_socket->so_rcv, 1,
2792	    SCTP_READ_LOCK_NOT_HELD,
2793	    SCTP_SO_NOT_LOCKED);
2794}
2795
2796
2797static void
2798sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2799    struct sctp_tmit_chunk *chk, int so_locked
2800#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2801    SCTP_UNUSED
2802#endif
2803)
2804{
2805	struct mbuf *m_notify;
2806	struct sctp_send_failed *ssf;
2807	struct sctp_send_failed_event *ssfe;
2808	struct sctp_queued_to_read *control;
2809	int length;
2810
2811	if ((stcb == NULL) ||
2812	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2813	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2814		/* event not enabled */
2815		return;
2816	}
2817	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2818		length = sizeof(struct sctp_send_failed_event);
2819	} else {
2820		length = sizeof(struct sctp_send_failed);
2821	}
2822	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2823	if (m_notify == NULL)
2824		/* no space left */
2825		return;
2826	SCTP_BUF_LEN(m_notify) = 0;
2827	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2828		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2829		memset(ssfe, 0, length);
2830		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2831		if (sent) {
2832			ssfe->ssfe_flags = SCTP_DATA_SENT;
2833		} else {
2834			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2835		}
2836		length += chk->send_size;
2837		length -= sizeof(struct sctp_data_chunk);
2838		ssfe->ssfe_length = length;
2839		ssfe->ssfe_error = error;
2840		/* not exactly what the user sent in, but should be close :) */
2841		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2842		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2843		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2844		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2845		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2846		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2847		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2848	} else {
2849		ssf = mtod(m_notify, struct sctp_send_failed *);
2850		memset(ssf, 0, length);
2851		ssf->ssf_type = SCTP_SEND_FAILED;
2852		if (sent) {
2853			ssf->ssf_flags = SCTP_DATA_SENT;
2854		} else {
2855			ssf->ssf_flags = SCTP_DATA_UNSENT;
2856		}
2857		length += chk->send_size;
2858		length -= sizeof(struct sctp_data_chunk);
2859		ssf->ssf_length = length;
2860		ssf->ssf_error = error;
2861		/* not exactly what the user sent in, but should be close :) */
2862		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2863		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2864		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2865		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2866		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2867		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2868		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2869		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2870		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2871	}
2872	if (chk->data) {
2873		/*
2874		 * trim off the sctp chunk header(it should be there)
2875		 */
2876		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2877			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2878			sctp_mbuf_crush(chk->data);
2879			chk->send_size -= sizeof(struct sctp_data_chunk);
2880		}
2881	}
2882	SCTP_BUF_NEXT(m_notify) = chk->data;
2883	/* Steal off the mbuf */
2884	chk->data = NULL;
2885	/*
2886	 * For this case, we check the actual socket buffer, since the assoc
2887	 * is going away we don't want to overfill the socket buffer for a
2888	 * non-reader
2889	 */
2890	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2891		sctp_m_freem(m_notify);
2892		return;
2893	}
2894	/* append to socket */
2895	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2896	    0, 0, stcb->asoc.context, 0, 0, 0,
2897	    m_notify);
2898	if (control == NULL) {
2899		/* no memory */
2900		sctp_m_freem(m_notify);
2901		return;
2902	}
2903	control->spec_flags = M_NOTIFICATION;
2904	sctp_add_to_readq(stcb->sctp_ep, stcb,
2905	    control,
2906	    &stcb->sctp_socket->so_rcv, 1,
2907	    SCTP_READ_LOCK_NOT_HELD,
2908	    so_locked);
2909}
2910
2911
2912static void
2913sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2914    struct sctp_stream_queue_pending *sp, int so_locked
2915#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2916    SCTP_UNUSED
2917#endif
2918)
2919{
2920	struct mbuf *m_notify;
2921	struct sctp_send_failed *ssf;
2922	struct sctp_send_failed_event *ssfe;
2923	struct sctp_queued_to_read *control;
2924	int length;
2925
2926	if ((stcb == NULL) ||
2927	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2928	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2929		/* event not enabled */
2930		return;
2931	}
2932	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2933		length = sizeof(struct sctp_send_failed_event);
2934	} else {
2935		length = sizeof(struct sctp_send_failed);
2936	}
2937	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2938	if (m_notify == NULL) {
2939		/* no space left */
2940		return;
2941	}
2942	SCTP_BUF_LEN(m_notify) = 0;
2943	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2944		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2945		memset(ssfe, 0, length);
2946		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2947		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2948		length += sp->length;
2949		ssfe->ssfe_length = length;
2950		ssfe->ssfe_error = error;
2951		/* not exactly what the user sent in, but should be close :) */
2952		ssfe->ssfe_info.snd_sid = sp->stream;
2953		if (sp->some_taken) {
2954			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2955		} else {
2956			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2957		}
2958		ssfe->ssfe_info.snd_ppid = sp->ppid;
2959		ssfe->ssfe_info.snd_context = sp->context;
2960		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2961		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2962		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2963	} else {
2964		ssf = mtod(m_notify, struct sctp_send_failed *);
2965		memset(ssf, 0, length);
2966		ssf->ssf_type = SCTP_SEND_FAILED;
2967		ssf->ssf_flags = SCTP_DATA_UNSENT;
2968		length += sp->length;
2969		ssf->ssf_length = length;
2970		ssf->ssf_error = error;
2971		/* not exactly what the user sent in, but should be close :) */
2972		ssf->ssf_info.sinfo_stream = sp->stream;
2973		ssf->ssf_info.sinfo_ssn = 0;
2974		if (sp->some_taken) {
2975			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2976		} else {
2977			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2978		}
2979		ssf->ssf_info.sinfo_ppid = sp->ppid;
2980		ssf->ssf_info.sinfo_context = sp->context;
2981		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2982		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2983		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2984	}
2985	SCTP_BUF_NEXT(m_notify) = sp->data;
2986
2987	/* Steal off the mbuf */
2988	sp->data = NULL;
2989	/*
2990	 * For this case, we check the actual socket buffer, since the assoc
2991	 * is going away we don't want to overfill the socket buffer for a
2992	 * non-reader
2993	 */
2994	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2995		sctp_m_freem(m_notify);
2996		return;
2997	}
2998	/* append to socket */
2999	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3000	    0, 0, stcb->asoc.context, 0, 0, 0,
3001	    m_notify);
3002	if (control == NULL) {
3003		/* no memory */
3004		sctp_m_freem(m_notify);
3005		return;
3006	}
3007	control->spec_flags = M_NOTIFICATION;
3008	sctp_add_to_readq(stcb->sctp_ep, stcb,
3009	    control,
3010	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3011}
3012
3013
3014
3015static void
3016sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3017{
3018	struct mbuf *m_notify;
3019	struct sctp_adaptation_event *sai;
3020	struct sctp_queued_to_read *control;
3021
3022	if ((stcb == NULL) ||
3023	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3024		/* event not enabled */
3025		return;
3026	}
3027	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3028	if (m_notify == NULL)
3029		/* no space left */
3030		return;
3031	SCTP_BUF_LEN(m_notify) = 0;
3032	sai = mtod(m_notify, struct sctp_adaptation_event *);
3033	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3034	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3035	sai->sai_flags = 0;
3036	sai->sai_length = sizeof(struct sctp_adaptation_event);
3037	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3038	sai->sai_assoc_id = sctp_get_associd(stcb);
3039
3040	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3041	SCTP_BUF_NEXT(m_notify) = NULL;
3042
3043	/* append to socket */
3044	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3045	    0, 0, stcb->asoc.context, 0, 0, 0,
3046	    m_notify);
3047	if (control == NULL) {
3048		/* no memory */
3049		sctp_m_freem(m_notify);
3050		return;
3051	}
3052	control->length = SCTP_BUF_LEN(m_notify);
3053	control->spec_flags = M_NOTIFICATION;
3054	/* not that we need this */
3055	control->tail_mbuf = m_notify;
3056	sctp_add_to_readq(stcb->sctp_ep, stcb,
3057	    control,
3058	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3059}
3060
3061/* This always must be called with the read-queue LOCKED in the INP */
3062static void
3063sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3064    uint32_t val, int so_locked
3065#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3066    SCTP_UNUSED
3067#endif
3068)
3069{
3070	struct mbuf *m_notify;
3071	struct sctp_pdapi_event *pdapi;
3072	struct sctp_queued_to_read *control;
3073	struct sockbuf *sb;
3074
3075	if ((stcb == NULL) ||
3076	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3077		/* event not enabled */
3078		return;
3079	}
3080	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3081		return;
3082	}
3083	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3084	if (m_notify == NULL)
3085		/* no space left */
3086		return;
3087	SCTP_BUF_LEN(m_notify) = 0;
3088	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3089	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3090	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3091	pdapi->pdapi_flags = 0;
3092	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3093	pdapi->pdapi_indication = error;
3094	pdapi->pdapi_stream = (val >> 16);
3095	pdapi->pdapi_seq = (val & 0x0000ffff);
3096	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3097
3098	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3099	SCTP_BUF_NEXT(m_notify) = NULL;
3100	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3101	    0, 0, stcb->asoc.context, 0, 0, 0,
3102	    m_notify);
3103	if (control == NULL) {
3104		/* no memory */
3105		sctp_m_freem(m_notify);
3106		return;
3107	}
3108	control->spec_flags = M_NOTIFICATION;
3109	control->length = SCTP_BUF_LEN(m_notify);
3110	/* not that we need this */
3111	control->tail_mbuf = m_notify;
3112	control->held_length = 0;
3113	control->length = 0;
3114	sb = &stcb->sctp_socket->so_rcv;
3115	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3116		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3117	}
3118	sctp_sballoc(stcb, sb, m_notify);
3119	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3120		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3121	}
3122	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3123	control->end_added = 1;
3124	if (stcb->asoc.control_pdapi)
3125		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3126	else {
3127		/* we really should not see this case */
3128		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3129	}
3130	if (stcb->sctp_ep && stcb->sctp_socket) {
3131		/* This should always be the case */
3132#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3133		struct socket *so;
3134
3135		so = SCTP_INP_SO(stcb->sctp_ep);
3136		if (!so_locked) {
3137			atomic_add_int(&stcb->asoc.refcnt, 1);
3138			SCTP_TCB_UNLOCK(stcb);
3139			SCTP_SOCKET_LOCK(so, 1);
3140			SCTP_TCB_LOCK(stcb);
3141			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3142			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3143				SCTP_SOCKET_UNLOCK(so, 1);
3144				return;
3145			}
3146		}
3147#endif
3148		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3149#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3150		if (!so_locked) {
3151			SCTP_SOCKET_UNLOCK(so, 1);
3152		}
3153#endif
3154	}
3155}
3156
3157static void
3158sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3159{
3160	struct mbuf *m_notify;
3161	struct sctp_shutdown_event *sse;
3162	struct sctp_queued_to_read *control;
3163
3164	/*
3165	 * For TCP model AND UDP connected sockets we will send an error up
3166	 * when an SHUTDOWN completes
3167	 */
3168	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3169	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3170		/* mark socket closed for read/write and wakeup! */
3171#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3172		struct socket *so;
3173
3174		so = SCTP_INP_SO(stcb->sctp_ep);
3175		atomic_add_int(&stcb->asoc.refcnt, 1);
3176		SCTP_TCB_UNLOCK(stcb);
3177		SCTP_SOCKET_LOCK(so, 1);
3178		SCTP_TCB_LOCK(stcb);
3179		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3180		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3181			SCTP_SOCKET_UNLOCK(so, 1);
3182			return;
3183		}
3184#endif
3185		socantsendmore(stcb->sctp_socket);
3186#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3187		SCTP_SOCKET_UNLOCK(so, 1);
3188#endif
3189	}
3190	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3191		/* event not enabled */
3192		return;
3193	}
3194	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3195	if (m_notify == NULL)
3196		/* no space left */
3197		return;
3198	sse = mtod(m_notify, struct sctp_shutdown_event *);
3199	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3200	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3201	sse->sse_flags = 0;
3202	sse->sse_length = sizeof(struct sctp_shutdown_event);
3203	sse->sse_assoc_id = sctp_get_associd(stcb);
3204
3205	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3206	SCTP_BUF_NEXT(m_notify) = NULL;
3207
3208	/* append to socket */
3209	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3210	    0, 0, stcb->asoc.context, 0, 0, 0,
3211	    m_notify);
3212	if (control == NULL) {
3213		/* no memory */
3214		sctp_m_freem(m_notify);
3215		return;
3216	}
3217	control->spec_flags = M_NOTIFICATION;
3218	control->length = SCTP_BUF_LEN(m_notify);
3219	/* not that we need this */
3220	control->tail_mbuf = m_notify;
3221	sctp_add_to_readq(stcb->sctp_ep, stcb,
3222	    control,
3223	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3224}
3225
3226static void
3227sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3228    int so_locked
3229#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3230    SCTP_UNUSED
3231#endif
3232)
3233{
3234	struct mbuf *m_notify;
3235	struct sctp_sender_dry_event *event;
3236	struct sctp_queued_to_read *control;
3237
3238	if ((stcb == NULL) ||
3239	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3240		/* event not enabled */
3241		return;
3242	}
3243	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3244	if (m_notify == NULL) {
3245		/* no space left */
3246		return;
3247	}
3248	SCTP_BUF_LEN(m_notify) = 0;
3249	event = mtod(m_notify, struct sctp_sender_dry_event *);
3250	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3251	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3252	event->sender_dry_flags = 0;
3253	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3254	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3255
3256	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3257	SCTP_BUF_NEXT(m_notify) = NULL;
3258
3259	/* append to socket */
3260	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3261	    0, 0, stcb->asoc.context, 0, 0, 0,
3262	    m_notify);
3263	if (control == NULL) {
3264		/* no memory */
3265		sctp_m_freem(m_notify);
3266		return;
3267	}
3268	control->length = SCTP_BUF_LEN(m_notify);
3269	control->spec_flags = M_NOTIFICATION;
3270	/* not that we need this */
3271	control->tail_mbuf = m_notify;
3272	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3273	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3274}
3275
3276
3277void
3278sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3279{
3280	struct mbuf *m_notify;
3281	struct sctp_queued_to_read *control;
3282	struct sctp_stream_change_event *stradd;
3283
3284	if ((stcb == NULL) ||
3285	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3286		/* event not enabled */
3287		return;
3288	}
3289	if ((stcb->asoc.peer_req_out) && flag) {
3290		/* Peer made the request, don't tell the local user */
3291		stcb->asoc.peer_req_out = 0;
3292		return;
3293	}
3294	stcb->asoc.peer_req_out = 0;
3295	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3296	if (m_notify == NULL)
3297		/* no space left */
3298		return;
3299	SCTP_BUF_LEN(m_notify) = 0;
3300	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3301	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3302	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3303	stradd->strchange_flags = flag;
3304	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3305	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3306	stradd->strchange_instrms = numberin;
3307	stradd->strchange_outstrms = numberout;
3308	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3309	SCTP_BUF_NEXT(m_notify) = NULL;
3310	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3311		/* no space */
3312		sctp_m_freem(m_notify);
3313		return;
3314	}
3315	/* append to socket */
3316	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3317	    0, 0, stcb->asoc.context, 0, 0, 0,
3318	    m_notify);
3319	if (control == NULL) {
3320		/* no memory */
3321		sctp_m_freem(m_notify);
3322		return;
3323	}
3324	control->spec_flags = M_NOTIFICATION;
3325	control->length = SCTP_BUF_LEN(m_notify);
3326	/* not that we need this */
3327	control->tail_mbuf = m_notify;
3328	sctp_add_to_readq(stcb->sctp_ep, stcb,
3329	    control,
3330	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3331}
3332
3333void
3334sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3335{
3336	struct mbuf *m_notify;
3337	struct sctp_queued_to_read *control;
3338	struct sctp_assoc_reset_event *strasoc;
3339
3340	if ((stcb == NULL) ||
3341	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3342		/* event not enabled */
3343		return;
3344	}
3345	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3346	if (m_notify == NULL)
3347		/* no space left */
3348		return;
3349	SCTP_BUF_LEN(m_notify) = 0;
3350	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3351	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3352	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3353	strasoc->assocreset_flags = flag;
3354	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3355	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3356	strasoc->assocreset_local_tsn = sending_tsn;
3357	strasoc->assocreset_remote_tsn = recv_tsn;
3358	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3359	SCTP_BUF_NEXT(m_notify) = NULL;
3360	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3361		/* no space */
3362		sctp_m_freem(m_notify);
3363		return;
3364	}
3365	/* append to socket */
3366	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3367	    0, 0, stcb->asoc.context, 0, 0, 0,
3368	    m_notify);
3369	if (control == NULL) {
3370		/* no memory */
3371		sctp_m_freem(m_notify);
3372		return;
3373	}
3374	control->spec_flags = M_NOTIFICATION;
3375	control->length = SCTP_BUF_LEN(m_notify);
3376	/* not that we need this */
3377	control->tail_mbuf = m_notify;
3378	sctp_add_to_readq(stcb->sctp_ep, stcb,
3379	    control,
3380	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3381}
3382
3383
3384
3385static void
3386sctp_notify_stream_reset(struct sctp_tcb *stcb,
3387    int number_entries, uint16_t * list, int flag)
3388{
3389	struct mbuf *m_notify;
3390	struct sctp_queued_to_read *control;
3391	struct sctp_stream_reset_event *strreset;
3392	int len;
3393
3394	if ((stcb == NULL) ||
3395	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3396		/* event not enabled */
3397		return;
3398	}
3399	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3400	if (m_notify == NULL)
3401		/* no space left */
3402		return;
3403	SCTP_BUF_LEN(m_notify) = 0;
3404	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3405	if (len > M_TRAILINGSPACE(m_notify)) {
3406		/* never enough room */
3407		sctp_m_freem(m_notify);
3408		return;
3409	}
3410	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3411	memset(strreset, 0, len);
3412	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3413	strreset->strreset_flags = flag;
3414	strreset->strreset_length = len;
3415	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3416	if (number_entries) {
3417		int i;
3418
3419		for (i = 0; i < number_entries; i++) {
3420			strreset->strreset_stream_list[i] = ntohs(list[i]);
3421		}
3422	}
3423	SCTP_BUF_LEN(m_notify) = len;
3424	SCTP_BUF_NEXT(m_notify) = NULL;
3425	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3426		/* no space */
3427		sctp_m_freem(m_notify);
3428		return;
3429	}
3430	/* append to socket */
3431	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3432	    0, 0, stcb->asoc.context, 0, 0, 0,
3433	    m_notify);
3434	if (control == NULL) {
3435		/* no memory */
3436		sctp_m_freem(m_notify);
3437		return;
3438	}
3439	control->spec_flags = M_NOTIFICATION;
3440	control->length = SCTP_BUF_LEN(m_notify);
3441	/* not that we need this */
3442	control->tail_mbuf = m_notify;
3443	sctp_add_to_readq(stcb->sctp_ep, stcb,
3444	    control,
3445	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3446}
3447
3448
3449static void
3450sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3451{
3452	struct mbuf *m_notify;
3453	struct sctp_remote_error *sre;
3454	struct sctp_queued_to_read *control;
3455	size_t notif_len, chunk_len;
3456
3457	if ((stcb == NULL) ||
3458	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3459		return;
3460	}
3461	if (chunk != NULL) {
3462		chunk_len = ntohs(chunk->ch.chunk_length);
3463	} else {
3464		chunk_len = 0;
3465	}
3466	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3467	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3468	if (m_notify == NULL) {
3469		/* Retry with smaller value. */
3470		notif_len = sizeof(struct sctp_remote_error);
3471		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3472		if (m_notify == NULL) {
3473			return;
3474		}
3475	}
3476	SCTP_BUF_NEXT(m_notify) = NULL;
3477	sre = mtod(m_notify, struct sctp_remote_error *);
3478	memset(sre, 0, notif_len);
3479	sre->sre_type = SCTP_REMOTE_ERROR;
3480	sre->sre_flags = 0;
3481	sre->sre_length = sizeof(struct sctp_remote_error);
3482	sre->sre_error = error;
3483	sre->sre_assoc_id = sctp_get_associd(stcb);
3484	if (notif_len > sizeof(struct sctp_remote_error)) {
3485		memcpy(sre->sre_data, chunk, chunk_len);
3486		sre->sre_length += chunk_len;
3487	}
3488	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3489	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3490	    0, 0, stcb->asoc.context, 0, 0, 0,
3491	    m_notify);
3492	if (control != NULL) {
3493		control->length = SCTP_BUF_LEN(m_notify);
3494		/* not that we need this */
3495		control->tail_mbuf = m_notify;
3496		control->spec_flags = M_NOTIFICATION;
3497		sctp_add_to_readq(stcb->sctp_ep, stcb,
3498		    control,
3499		    &stcb->sctp_socket->so_rcv, 1,
3500		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3501	} else {
3502		sctp_m_freem(m_notify);
3503	}
3504}
3505
3506
3507void
3508sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3509    uint32_t error, void *data, int so_locked
3510#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3511    SCTP_UNUSED
3512#endif
3513)
3514{
3515	if ((stcb == NULL) ||
3516	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3517	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3518	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3519		/* If the socket is gone we are out of here */
3520		return;
3521	}
3522	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3523		return;
3524	}
3525	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3526	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3527		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3528		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3529		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3530			/* Don't report these in front states */
3531			return;
3532		}
3533	}
3534	switch (notification) {
3535	case SCTP_NOTIFY_ASSOC_UP:
3536		if (stcb->asoc.assoc_up_sent == 0) {
3537			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3538			stcb->asoc.assoc_up_sent = 1;
3539		}
3540		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3541			sctp_notify_adaptation_layer(stcb);
3542		}
3543		if (stcb->asoc.peer_supports_auth == 0) {
3544			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3545			    NULL, so_locked);
3546		}
3547		break;
3548	case SCTP_NOTIFY_ASSOC_DOWN:
3549		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3550		break;
3551	case SCTP_NOTIFY_INTERFACE_DOWN:
3552		{
3553			struct sctp_nets *net;
3554
3555			net = (struct sctp_nets *)data;
3556			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3557			    (struct sockaddr *)&net->ro._l_addr, error);
3558			break;
3559		}
3560	case SCTP_NOTIFY_INTERFACE_UP:
3561		{
3562			struct sctp_nets *net;
3563
3564			net = (struct sctp_nets *)data;
3565			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3566			    (struct sockaddr *)&net->ro._l_addr, error);
3567			break;
3568		}
3569	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3570		{
3571			struct sctp_nets *net;
3572
3573			net = (struct sctp_nets *)data;
3574			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3575			    (struct sockaddr *)&net->ro._l_addr, error);
3576			break;
3577		}
3578	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3579		sctp_notify_send_failed2(stcb, error,
3580		    (struct sctp_stream_queue_pending *)data, so_locked);
3581		break;
3582	case SCTP_NOTIFY_SENT_DG_FAIL:
3583		sctp_notify_send_failed(stcb, 1, error,
3584		    (struct sctp_tmit_chunk *)data, so_locked);
3585		break;
3586	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3587		sctp_notify_send_failed(stcb, 0, error,
3588		    (struct sctp_tmit_chunk *)data, so_locked);
3589		break;
3590	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3591		{
3592			uint32_t val;
3593
3594			val = *((uint32_t *) data);
3595
3596			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3597			break;
3598		}
3599	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3600		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3601		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3602			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3603		} else {
3604			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3605		}
3606		break;
3607	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3608		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3609		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3610			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3611		} else {
3612			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3613		}
3614		break;
3615	case SCTP_NOTIFY_ASSOC_RESTART:
3616		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3617		if (stcb->asoc.peer_supports_auth == 0) {
3618			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3619			    NULL, so_locked);
3620		}
3621		break;
3622	case SCTP_NOTIFY_STR_RESET_SEND:
3623		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3624		break;
3625	case SCTP_NOTIFY_STR_RESET_RECV:
3626		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3627		break;
3628	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3629		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3630		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3631		break;
3632	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3633		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3634		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3635		break;
3636	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3637		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3638		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3639		break;
3640	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3641		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3642		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3643		break;
3644	case SCTP_NOTIFY_ASCONF_ADD_IP:
3645		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3646		    error);
3647		break;
3648	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3649		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3650		    error);
3651		break;
3652	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3653		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3654		    error);
3655		break;
3656	case SCTP_NOTIFY_PEER_SHUTDOWN:
3657		sctp_notify_shutdown_event(stcb);
3658		break;
3659	case SCTP_NOTIFY_AUTH_NEW_KEY:
3660		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3661		    (uint16_t) (uintptr_t) data,
3662		    so_locked);
3663		break;
3664	case SCTP_NOTIFY_AUTH_FREE_KEY:
3665		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3666		    (uint16_t) (uintptr_t) data,
3667		    so_locked);
3668		break;
3669	case SCTP_NOTIFY_NO_PEER_AUTH:
3670		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3671		    (uint16_t) (uintptr_t) data,
3672		    so_locked);
3673		break;
3674	case SCTP_NOTIFY_SENDER_DRY:
3675		sctp_notify_sender_dry_event(stcb, so_locked);
3676		break;
3677	case SCTP_NOTIFY_REMOTE_ERROR:
3678		sctp_notify_remote_error(stcb, error, data);
3679		break;
3680	default:
3681		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3682		    __FUNCTION__, notification, notification);
3683		break;
3684	}			/* end switch */
3685}
3686
3687void
3688sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3689#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3690    SCTP_UNUSED
3691#endif
3692)
3693{
3694	struct sctp_association *asoc;
3695	struct sctp_stream_out *outs;
3696	struct sctp_tmit_chunk *chk, *nchk;
3697	struct sctp_stream_queue_pending *sp, *nsp;
3698	int i;
3699
3700	if (stcb == NULL) {
3701		return;
3702	}
3703	asoc = &stcb->asoc;
3704	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3705		/* already being freed */
3706		return;
3707	}
3708	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3709	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3710	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3711		return;
3712	}
3713	/* now through all the gunk freeing chunks */
3714	if (holds_lock == 0) {
3715		SCTP_TCB_SEND_LOCK(stcb);
3716	}
3717	/* sent queue SHOULD be empty */
3718	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3719		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3720		asoc->sent_queue_cnt--;
3721		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3722			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3723				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3724#ifdef INVARIANTS
3725			} else {
3726				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3727#endif
3728			}
3729		}
3730		if (chk->data != NULL) {
3731			sctp_free_bufspace(stcb, asoc, chk, 1);
3732			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3733			    error, chk, so_locked);
3734			if (chk->data) {
3735				sctp_m_freem(chk->data);
3736				chk->data = NULL;
3737			}
3738		}
3739		sctp_free_a_chunk(stcb, chk, so_locked);
3740		/* sa_ignore FREED_MEMORY */
3741	}
3742	/* pending send queue SHOULD be empty */
3743	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3744		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3745		asoc->send_queue_cnt--;
3746		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3747			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3748#ifdef INVARIANTS
3749		} else {
3750			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3751#endif
3752		}
3753		if (chk->data != NULL) {
3754			sctp_free_bufspace(stcb, asoc, chk, 1);
3755			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3756			    error, chk, so_locked);
3757			if (chk->data) {
3758				sctp_m_freem(chk->data);
3759				chk->data = NULL;
3760			}
3761		}
3762		sctp_free_a_chunk(stcb, chk, so_locked);
3763		/* sa_ignore FREED_MEMORY */
3764	}
3765	for (i = 0; i < asoc->streamoutcnt; i++) {
3766		/* For each stream */
3767		outs = &asoc->strmout[i];
3768		/* clean up any sends there */
3769		asoc->locked_on_sending = NULL;
3770		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3771			asoc->stream_queue_cnt--;
3772			TAILQ_REMOVE(&outs->outqueue, sp, next);
3773			sctp_free_spbufspace(stcb, asoc, sp);
3774			if (sp->data) {
3775				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3776				    error, (void *)sp, so_locked);
3777				if (sp->data) {
3778					sctp_m_freem(sp->data);
3779					sp->data = NULL;
3780					sp->tail_mbuf = NULL;
3781					sp->length = 0;
3782				}
3783			}
3784			if (sp->net) {
3785				sctp_free_remote_addr(sp->net);
3786				sp->net = NULL;
3787			}
3788			/* Free the chunk */
3789			sctp_free_a_strmoq(stcb, sp, so_locked);
3790			/* sa_ignore FREED_MEMORY */
3791		}
3792	}
3793
3794	if (holds_lock == 0) {
3795		SCTP_TCB_SEND_UNLOCK(stcb);
3796	}
3797}
3798
3799void
3800sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3801    struct sctp_abort_chunk *abort, int so_locked
3802#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3803    SCTP_UNUSED
3804#endif
3805)
3806{
3807	if (stcb == NULL) {
3808		return;
3809	}
3810	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3811	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3812	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3813		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3814	}
3815	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3816	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3817	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3818		return;
3819	}
3820	/* Tell them we lost the asoc */
3821	sctp_report_all_outbound(stcb, error, 1, so_locked);
3822	if (from_peer) {
3823		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3824	} else {
3825		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3826	}
3827}
3828
3829void
3830sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3831    struct mbuf *m, int iphlen,
3832    struct sockaddr *src, struct sockaddr *dst,
3833    struct sctphdr *sh, struct mbuf *op_err,
3834    uint8_t use_mflowid, uint32_t mflowid,
3835    uint32_t vrf_id, uint16_t port)
3836{
3837	uint32_t vtag;
3838
3839#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3840	struct socket *so;
3841
3842#endif
3843
3844	vtag = 0;
3845	if (stcb != NULL) {
3846		/* We have a TCB to abort, send notification too */
3847		vtag = stcb->asoc.peer_vtag;
3848		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3849		/* get the assoc vrf id and table id */
3850		vrf_id = stcb->asoc.vrf_id;
3851		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3852	}
3853	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3854	    use_mflowid, mflowid,
3855	    vrf_id, port);
3856	if (stcb != NULL) {
3857		/* Ok, now lets free it */
3858#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3859		so = SCTP_INP_SO(inp);
3860		atomic_add_int(&stcb->asoc.refcnt, 1);
3861		SCTP_TCB_UNLOCK(stcb);
3862		SCTP_SOCKET_LOCK(so, 1);
3863		SCTP_TCB_LOCK(stcb);
3864		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3865#endif
3866		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3867		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3868		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3869			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3870		}
3871		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3872#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3873		SCTP_SOCKET_UNLOCK(so, 1);
3874#endif
3875	}
3876}
3877
3878#ifdef SCTP_ASOCLOG_OF_TSNS
3879void
3880sctp_print_out_track_log(struct sctp_tcb *stcb)
3881{
3882#ifdef NOSIY_PRINTS
3883	int i;
3884
3885	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3886	SCTP_PRINTF("IN bound TSN log-aaa\n");
3887	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3888		SCTP_PRINTF("None rcvd\n");
3889		goto none_in;
3890	}
3891	if (stcb->asoc.tsn_in_wrapped) {
3892		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3893			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3894			    stcb->asoc.in_tsnlog[i].tsn,
3895			    stcb->asoc.in_tsnlog[i].strm,
3896			    stcb->asoc.in_tsnlog[i].seq,
3897			    stcb->asoc.in_tsnlog[i].flgs,
3898			    stcb->asoc.in_tsnlog[i].sz);
3899		}
3900	}
3901	if (stcb->asoc.tsn_in_at) {
3902		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3903			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3904			    stcb->asoc.in_tsnlog[i].tsn,
3905			    stcb->asoc.in_tsnlog[i].strm,
3906			    stcb->asoc.in_tsnlog[i].seq,
3907			    stcb->asoc.in_tsnlog[i].flgs,
3908			    stcb->asoc.in_tsnlog[i].sz);
3909		}
3910	}
3911none_in:
3912	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3913	if ((stcb->asoc.tsn_out_at == 0) &&
3914	    (stcb->asoc.tsn_out_wrapped == 0)) {
3915		SCTP_PRINTF("None sent\n");
3916	}
3917	if (stcb->asoc.tsn_out_wrapped) {
3918		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3919			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3920			    stcb->asoc.out_tsnlog[i].tsn,
3921			    stcb->asoc.out_tsnlog[i].strm,
3922			    stcb->asoc.out_tsnlog[i].seq,
3923			    stcb->asoc.out_tsnlog[i].flgs,
3924			    stcb->asoc.out_tsnlog[i].sz);
3925		}
3926	}
3927	if (stcb->asoc.tsn_out_at) {
3928		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3929			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3930			    stcb->asoc.out_tsnlog[i].tsn,
3931			    stcb->asoc.out_tsnlog[i].strm,
3932			    stcb->asoc.out_tsnlog[i].seq,
3933			    stcb->asoc.out_tsnlog[i].flgs,
3934			    stcb->asoc.out_tsnlog[i].sz);
3935		}
3936	}
3937#endif
3938}
3939
3940#endif
3941
3942void
3943sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3944    struct mbuf *op_err,
3945    int so_locked
3946#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3947    SCTP_UNUSED
3948#endif
3949)
3950{
3951#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3952	struct socket *so;
3953
3954#endif
3955
3956#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3957	so = SCTP_INP_SO(inp);
3958#endif
3959	if (stcb == NULL) {
3960		/* Got to have a TCB */
3961		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3962			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
3963				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3964				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3965			}
3966		}
3967		return;
3968	} else {
3969		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3970	}
3971	/* notify the ulp */
3972	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3973		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3974	}
3975	/* notify the peer */
3976	sctp_send_abort_tcb(stcb, op_err, so_locked);
3977	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3978	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3979	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3980		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3981	}
3982	/* now free the asoc */
3983#ifdef SCTP_ASOCLOG_OF_TSNS
3984	sctp_print_out_track_log(stcb);
3985#endif
3986#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3987	if (!so_locked) {
3988		atomic_add_int(&stcb->asoc.refcnt, 1);
3989		SCTP_TCB_UNLOCK(stcb);
3990		SCTP_SOCKET_LOCK(so, 1);
3991		SCTP_TCB_LOCK(stcb);
3992		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3993	}
3994#endif
3995	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3996#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3997	if (!so_locked) {
3998		SCTP_SOCKET_UNLOCK(so, 1);
3999	}
4000#endif
4001}
4002
4003void
4004sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4005    struct sockaddr *src, struct sockaddr *dst,
4006    struct sctphdr *sh, struct sctp_inpcb *inp,
4007    struct mbuf *cause,
4008    uint8_t use_mflowid, uint32_t mflowid,
4009    uint32_t vrf_id, uint16_t port)
4010{
4011	struct sctp_chunkhdr *ch, chunk_buf;
4012	unsigned int chk_length;
4013	int contains_init_chunk;
4014
4015	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4016	/* Generate a TO address for future reference */
4017	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4018		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4019			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4020			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4021		}
4022	}
4023	contains_init_chunk = 0;
4024	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4025	    sizeof(*ch), (uint8_t *) & chunk_buf);
4026	while (ch != NULL) {
4027		chk_length = ntohs(ch->chunk_length);
4028		if (chk_length < sizeof(*ch)) {
4029			/* break to abort land */
4030			break;
4031		}
4032		switch (ch->chunk_type) {
4033		case SCTP_INIT:
4034			contains_init_chunk = 1;
4035			break;
4036		case SCTP_PACKET_DROPPED:
4037			/* we don't respond to pkt-dropped */
4038			return;
4039		case SCTP_ABORT_ASSOCIATION:
4040			/* we don't respond with an ABORT to an ABORT */
4041			return;
4042		case SCTP_SHUTDOWN_COMPLETE:
4043			/*
4044			 * we ignore it since we are not waiting for it and
4045			 * peer is gone
4046			 */
4047			return;
4048		case SCTP_SHUTDOWN_ACK:
4049			sctp_send_shutdown_complete2(src, dst, sh,
4050			    use_mflowid, mflowid,
4051			    vrf_id, port);
4052			return;
4053		default:
4054			break;
4055		}
4056		offset += SCTP_SIZE32(chk_length);
4057		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4058		    sizeof(*ch), (uint8_t *) & chunk_buf);
4059	}
4060	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4061	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4062	    (contains_init_chunk == 0))) {
4063		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4064		    use_mflowid, mflowid,
4065		    vrf_id, port);
4066	}
4067}
4068
4069/*
4070 * check the inbound datagram to make sure there is not an abort inside it,
4071 * if there is return 1, else return 0.
4072 */
4073int
4074sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4075{
4076	struct sctp_chunkhdr *ch;
4077	struct sctp_init_chunk *init_chk, chunk_buf;
4078	int offset;
4079	unsigned int chk_length;
4080
4081	offset = iphlen + sizeof(struct sctphdr);
4082	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4083	    (uint8_t *) & chunk_buf);
4084	while (ch != NULL) {
4085		chk_length = ntohs(ch->chunk_length);
4086		if (chk_length < sizeof(*ch)) {
4087			/* packet is probably corrupt */
4088			break;
4089		}
4090		/* we seem to be ok, is it an abort? */
4091		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4092			/* yep, tell them */
4093			return (1);
4094		}
4095		if (ch->chunk_type == SCTP_INITIATION) {
4096			/* need to update the Vtag */
4097			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4098			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4099			if (init_chk != NULL) {
4100				*vtagfill = ntohl(init_chk->init.initiate_tag);
4101			}
4102		}
4103		/* Nope, move to the next chunk */
4104		offset += SCTP_SIZE32(chk_length);
4105		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4106		    sizeof(*ch), (uint8_t *) & chunk_buf);
4107	}
4108	return (0);
4109}
4110
4111/*
4112 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4113 * set (i.e. it's 0) so, create this function to compare link local scopes
4114 */
4115#ifdef INET6
4116uint32_t
4117sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4118{
4119	struct sockaddr_in6 a, b;
4120
4121	/* save copies */
4122	a = *addr1;
4123	b = *addr2;
4124
4125	if (a.sin6_scope_id == 0)
4126		if (sa6_recoverscope(&a)) {
4127			/* can't get scope, so can't match */
4128			return (0);
4129		}
4130	if (b.sin6_scope_id == 0)
4131		if (sa6_recoverscope(&b)) {
4132			/* can't get scope, so can't match */
4133			return (0);
4134		}
4135	if (a.sin6_scope_id != b.sin6_scope_id)
4136		return (0);
4137
4138	return (1);
4139}
4140
4141/*
4142 * returns a sockaddr_in6 with embedded scope recovered and removed
4143 */
4144struct sockaddr_in6 *
4145sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4146{
4147	/* check and strip embedded scope junk */
4148	if (addr->sin6_family == AF_INET6) {
4149		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4150			if (addr->sin6_scope_id == 0) {
4151				*store = *addr;
4152				if (!sa6_recoverscope(store)) {
4153					/* use the recovered scope */
4154					addr = store;
4155				}
4156			} else {
4157				/* else, return the original "to" addr */
4158				in6_clearscope(&addr->sin6_addr);
4159			}
4160		}
4161	}
4162	return (addr);
4163}
4164
4165#endif
4166
4167/*
4168 * are the two addresses the same?  currently a "scopeless" check returns: 1
4169 * if same, 0 if not
4170 */
4171int
4172sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4173{
4174
4175	/* must be valid */
4176	if (sa1 == NULL || sa2 == NULL)
4177		return (0);
4178
4179	/* must be the same family */
4180	if (sa1->sa_family != sa2->sa_family)
4181		return (0);
4182
4183	switch (sa1->sa_family) {
4184#ifdef INET6
4185	case AF_INET6:
4186		{
4187			/* IPv6 addresses */
4188			struct sockaddr_in6 *sin6_1, *sin6_2;
4189
4190			sin6_1 = (struct sockaddr_in6 *)sa1;
4191			sin6_2 = (struct sockaddr_in6 *)sa2;
4192			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4193			    sin6_2));
4194		}
4195#endif
4196#ifdef INET
4197	case AF_INET:
4198		{
4199			/* IPv4 addresses */
4200			struct sockaddr_in *sin_1, *sin_2;
4201
4202			sin_1 = (struct sockaddr_in *)sa1;
4203			sin_2 = (struct sockaddr_in *)sa2;
4204			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4205		}
4206#endif
4207	default:
4208		/* we don't do these... */
4209		return (0);
4210	}
4211}
4212
4213void
4214sctp_print_address(struct sockaddr *sa)
4215{
4216#ifdef INET6
4217	char ip6buf[INET6_ADDRSTRLEN];
4218
4219#endif
4220
4221	switch (sa->sa_family) {
4222#ifdef INET6
4223	case AF_INET6:
4224		{
4225			struct sockaddr_in6 *sin6;
4226
4227			sin6 = (struct sockaddr_in6 *)sa;
4228			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4229			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4230			    ntohs(sin6->sin6_port),
4231			    sin6->sin6_scope_id);
4232			break;
4233		}
4234#endif
4235#ifdef INET
4236	case AF_INET:
4237		{
4238			struct sockaddr_in *sin;
4239			unsigned char *p;
4240
4241			sin = (struct sockaddr_in *)sa;
4242			p = (unsigned char *)&sin->sin_addr;
4243			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4244			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4245			break;
4246		}
4247#endif
4248	default:
4249		SCTP_PRINTF("?\n");
4250		break;
4251	}
4252}
4253
4254void
4255sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4256    struct sctp_inpcb *new_inp,
4257    struct sctp_tcb *stcb,
4258    int waitflags)
4259{
4260	/*
4261	 * go through our old INP and pull off any control structures that
4262	 * belong to stcb and move then to the new inp.
4263	 */
4264	struct socket *old_so, *new_so;
4265	struct sctp_queued_to_read *control, *nctl;
4266	struct sctp_readhead tmp_queue;
4267	struct mbuf *m;
4268	int error = 0;
4269
4270	old_so = old_inp->sctp_socket;
4271	new_so = new_inp->sctp_socket;
4272	TAILQ_INIT(&tmp_queue);
4273	error = sblock(&old_so->so_rcv, waitflags);
4274	if (error) {
4275		/*
4276		 * Gak, can't get sblock, we have a problem. data will be
4277		 * left stranded.. and we don't dare look at it since the
4278		 * other thread may be reading something. Oh well, its a
4279		 * screwed up app that does a peeloff OR a accept while
4280		 * reading from the main socket... actually its only the
4281		 * peeloff() case, since I think read will fail on a
4282		 * listening socket..
4283		 */
4284		return;
4285	}
4286	/* lock the socket buffers */
4287	SCTP_INP_READ_LOCK(old_inp);
4288	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4289		/* Pull off all for out target stcb */
4290		if (control->stcb == stcb) {
4291			/* remove it we want it */
4292			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4293			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4294			m = control->data;
4295			while (m) {
4296				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4297					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4298				}
4299				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4300				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4301					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4302				}
4303				m = SCTP_BUF_NEXT(m);
4304			}
4305		}
4306	}
4307	SCTP_INP_READ_UNLOCK(old_inp);
4308	/* Remove the sb-lock on the old socket */
4309
4310	sbunlock(&old_so->so_rcv);
4311	/* Now we move them over to the new socket buffer */
4312	SCTP_INP_READ_LOCK(new_inp);
4313	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4314		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4315		m = control->data;
4316		while (m) {
4317			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4318				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4319			}
4320			sctp_sballoc(stcb, &new_so->so_rcv, m);
4321			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4322				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4323			}
4324			m = SCTP_BUF_NEXT(m);
4325		}
4326	}
4327	SCTP_INP_READ_UNLOCK(new_inp);
4328}
4329
4330void
4331sctp_add_to_readq(struct sctp_inpcb *inp,
4332    struct sctp_tcb *stcb,
4333    struct sctp_queued_to_read *control,
4334    struct sockbuf *sb,
4335    int end,
4336    int inp_read_lock_held,
4337    int so_locked
4338#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4339    SCTP_UNUSED
4340#endif
4341)
4342{
4343	/*
4344	 * Here we must place the control on the end of the socket read
4345	 * queue AND increment sb_cc so that select will work properly on
4346	 * read.
4347	 */
4348	struct mbuf *m, *prev = NULL;
4349
4350	if (inp == NULL) {
4351		/* Gak, TSNH!! */
4352#ifdef INVARIANTS
4353		panic("Gak, inp NULL on add_to_readq");
4354#endif
4355		return;
4356	}
4357	if (inp_read_lock_held == 0)
4358		SCTP_INP_READ_LOCK(inp);
4359	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4360		sctp_free_remote_addr(control->whoFrom);
4361		if (control->data) {
4362			sctp_m_freem(control->data);
4363			control->data = NULL;
4364		}
4365		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4366		if (inp_read_lock_held == 0)
4367			SCTP_INP_READ_UNLOCK(inp);
4368		return;
4369	}
4370	if (!(control->spec_flags & M_NOTIFICATION)) {
4371		atomic_add_int(&inp->total_recvs, 1);
4372		if (!control->do_not_ref_stcb) {
4373			atomic_add_int(&stcb->total_recvs, 1);
4374		}
4375	}
4376	m = control->data;
4377	control->held_length = 0;
4378	control->length = 0;
4379	while (m) {
4380		if (SCTP_BUF_LEN(m) == 0) {
4381			/* Skip mbufs with NO length */
4382			if (prev == NULL) {
4383				/* First one */
4384				control->data = sctp_m_free(m);
4385				m = control->data;
4386			} else {
4387				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4388				m = SCTP_BUF_NEXT(prev);
4389			}
4390			if (m == NULL) {
4391				control->tail_mbuf = prev;
4392			}
4393			continue;
4394		}
4395		prev = m;
4396		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4397			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4398		}
4399		sctp_sballoc(stcb, sb, m);
4400		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4401			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4402		}
4403		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4404		m = SCTP_BUF_NEXT(m);
4405	}
4406	if (prev != NULL) {
4407		control->tail_mbuf = prev;
4408	} else {
4409		/* Everything got collapsed out?? */
4410		sctp_free_remote_addr(control->whoFrom);
4411		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4412		if (inp_read_lock_held == 0)
4413			SCTP_INP_READ_UNLOCK(inp);
4414		return;
4415	}
4416	if (end) {
4417		control->end_added = 1;
4418	}
4419	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4420	if (inp_read_lock_held == 0)
4421		SCTP_INP_READ_UNLOCK(inp);
4422	if (inp && inp->sctp_socket) {
4423		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4424			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4425		} else {
4426#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4427			struct socket *so;
4428
4429			so = SCTP_INP_SO(inp);
4430			if (!so_locked) {
4431				if (stcb) {
4432					atomic_add_int(&stcb->asoc.refcnt, 1);
4433					SCTP_TCB_UNLOCK(stcb);
4434				}
4435				SCTP_SOCKET_LOCK(so, 1);
4436				if (stcb) {
4437					SCTP_TCB_LOCK(stcb);
4438					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4439				}
4440				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4441					SCTP_SOCKET_UNLOCK(so, 1);
4442					return;
4443				}
4444			}
4445#endif
4446			sctp_sorwakeup(inp, inp->sctp_socket);
4447#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4448			if (!so_locked) {
4449				SCTP_SOCKET_UNLOCK(so, 1);
4450			}
4451#endif
4452		}
4453	}
4454}
4455
4456
4457int
4458sctp_append_to_readq(struct sctp_inpcb *inp,
4459    struct sctp_tcb *stcb,
4460    struct sctp_queued_to_read *control,
4461    struct mbuf *m,
4462    int end,
4463    int ctls_cumack,
4464    struct sockbuf *sb)
4465{
4466	/*
4467	 * A partial delivery API event is underway. OR we are appending on
4468	 * the reassembly queue.
4469	 *
4470	 * If PDAPI this means we need to add m to the end of the data.
4471	 * Increase the length in the control AND increment the sb_cc.
4472	 * Otherwise sb is NULL and all we need to do is put it at the end
4473	 * of the mbuf chain.
4474	 */
4475	int len = 0;
4476	struct mbuf *mm, *tail = NULL, *prev = NULL;
4477
4478	if (inp) {
4479		SCTP_INP_READ_LOCK(inp);
4480	}
4481	if (control == NULL) {
4482get_out:
4483		if (inp) {
4484			SCTP_INP_READ_UNLOCK(inp);
4485		}
4486		return (-1);
4487	}
4488	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4489		SCTP_INP_READ_UNLOCK(inp);
4490		return (0);
4491	}
4492	if (control->end_added) {
4493		/* huh this one is complete? */
4494		goto get_out;
4495	}
4496	mm = m;
4497	if (mm == NULL) {
4498		goto get_out;
4499	}
4500	while (mm) {
4501		if (SCTP_BUF_LEN(mm) == 0) {
4502			/* Skip mbufs with NO lenght */
4503			if (prev == NULL) {
4504				/* First one */
4505				m = sctp_m_free(mm);
4506				mm = m;
4507			} else {
4508				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4509				mm = SCTP_BUF_NEXT(prev);
4510			}
4511			continue;
4512		}
4513		prev = mm;
4514		len += SCTP_BUF_LEN(mm);
4515		if (sb) {
4516			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4517				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4518			}
4519			sctp_sballoc(stcb, sb, mm);
4520			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4521				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4522			}
4523		}
4524		mm = SCTP_BUF_NEXT(mm);
4525	}
4526	if (prev) {
4527		tail = prev;
4528	} else {
4529		/* Really there should always be a prev */
4530		if (m == NULL) {
4531			/* Huh nothing left? */
4532#ifdef INVARIANTS
4533			panic("Nothing left to add?");
4534#else
4535			goto get_out;
4536#endif
4537		}
4538		tail = m;
4539	}
4540	if (control->tail_mbuf) {
4541		/* append */
4542		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4543		control->tail_mbuf = tail;
4544	} else {
4545		/* nothing there */
4546#ifdef INVARIANTS
4547		if (control->data != NULL) {
4548			panic("This should NOT happen");
4549		}
4550#endif
4551		control->data = m;
4552		control->tail_mbuf = tail;
4553	}
4554	atomic_add_int(&control->length, len);
4555	if (end) {
4556		/* message is complete */
4557		if (stcb && (control == stcb->asoc.control_pdapi)) {
4558			stcb->asoc.control_pdapi = NULL;
4559		}
4560		control->held_length = 0;
4561		control->end_added = 1;
4562	}
4563	if (stcb == NULL) {
4564		control->do_not_ref_stcb = 1;
4565	}
4566	/*
4567	 * When we are appending in partial delivery, the cum-ack is used
4568	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4569	 * is populated in the outbound sinfo structure from the true cumack
4570	 * if the association exists...
4571	 */
4572	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4573	if (inp) {
4574		SCTP_INP_READ_UNLOCK(inp);
4575	}
4576	if (inp && inp->sctp_socket) {
4577		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4578			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4579		} else {
4580#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4581			struct socket *so;
4582
4583			so = SCTP_INP_SO(inp);
4584			if (stcb) {
4585				atomic_add_int(&stcb->asoc.refcnt, 1);
4586				SCTP_TCB_UNLOCK(stcb);
4587			}
4588			SCTP_SOCKET_LOCK(so, 1);
4589			if (stcb) {
4590				SCTP_TCB_LOCK(stcb);
4591				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4592			}
4593			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4594				SCTP_SOCKET_UNLOCK(so, 1);
4595				return (0);
4596			}
4597#endif
4598			sctp_sorwakeup(inp, inp->sctp_socket);
4599#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4600			SCTP_SOCKET_UNLOCK(so, 1);
4601#endif
4602		}
4603	}
4604	return (0);
4605}
4606
4607
4608
4609/*************HOLD THIS COMMENT FOR PATCH FILE OF
4610 *************ALTERNATE ROUTING CODE
4611 */
4612
4613/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4614 *************ALTERNATE ROUTING CODE
4615 */
4616
4617struct mbuf *
4618sctp_generate_cause(uint16_t code, char *info)
4619{
4620	struct mbuf *m;
4621	struct sctp_gen_error_cause *cause;
4622	size_t info_len, len;
4623
4624	if ((code == 0) || (info == NULL)) {
4625		return (NULL);
4626	}
4627	info_len = strlen(info);
4628	len = sizeof(struct sctp_paramhdr) + info_len;
4629	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4630	if (m != NULL) {
4631		SCTP_BUF_LEN(m) = len;
4632		cause = mtod(m, struct sctp_gen_error_cause *);
4633		cause->code = htons(code);
4634		cause->length = htons((uint16_t) len);
4635		memcpy(cause->info, info, info_len);
4636	}
4637	return (m);
4638}
4639
4640struct mbuf *
4641sctp_generate_no_user_data_cause(uint32_t tsn)
4642{
4643	struct mbuf *m;
4644	struct sctp_error_no_user_data *no_user_data_cause;
4645	size_t len;
4646
4647	len = sizeof(struct sctp_error_no_user_data);
4648	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4649	if (m != NULL) {
4650		SCTP_BUF_LEN(m) = len;
4651		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4652		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4653		no_user_data_cause->cause.length = htons((uint16_t) len);
4654		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4655	}
4656	return (m);
4657}
4658
4659#ifdef SCTP_MBCNT_LOGGING
4660void
4661sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4662    struct sctp_tmit_chunk *tp1, int chk_cnt)
4663{
4664	if (tp1->data == NULL) {
4665		return;
4666	}
4667	asoc->chunks_on_out_queue -= chk_cnt;
4668	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4669		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4670		    asoc->total_output_queue_size,
4671		    tp1->book_size,
4672		    0,
4673		    tp1->mbcnt);
4674	}
4675	if (asoc->total_output_queue_size >= tp1->book_size) {
4676		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4677	} else {
4678		asoc->total_output_queue_size = 0;
4679	}
4680
4681	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4682	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4683		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4684			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4685		} else {
4686			stcb->sctp_socket->so_snd.sb_cc = 0;
4687
4688		}
4689	}
4690}
4691
4692#endif
4693
4694int
4695sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4696    uint8_t sent, int so_locked
4697#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4698    SCTP_UNUSED
4699#endif
4700)
4701{
4702	struct sctp_stream_out *strq;
4703	struct sctp_tmit_chunk *chk = NULL, *tp2;
4704	struct sctp_stream_queue_pending *sp;
4705	uint16_t stream = 0, seq = 0;
4706	uint8_t foundeom = 0;
4707	int ret_sz = 0;
4708	int notdone;
4709	int do_wakeup_routine = 0;
4710
4711	stream = tp1->rec.data.stream_number;
4712	seq = tp1->rec.data.stream_seq;
4713	do {
4714		ret_sz += tp1->book_size;
4715		if (tp1->data != NULL) {
4716			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4717				sctp_flight_size_decrease(tp1);
4718				sctp_total_flight_decrease(stcb, tp1);
4719			}
4720			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4721			stcb->asoc.peers_rwnd += tp1->send_size;
4722			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4723			if (sent) {
4724				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4725			} else {
4726				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4727			}
4728			if (tp1->data) {
4729				sctp_m_freem(tp1->data);
4730				tp1->data = NULL;
4731			}
4732			do_wakeup_routine = 1;
4733			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4734				stcb->asoc.sent_queue_cnt_removeable--;
4735			}
4736		}
4737		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4738		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4739		    SCTP_DATA_NOT_FRAG) {
4740			/* not frag'ed we ae done   */
4741			notdone = 0;
4742			foundeom = 1;
4743		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4744			/* end of frag, we are done */
4745			notdone = 0;
4746			foundeom = 1;
4747		} else {
4748			/*
4749			 * Its a begin or middle piece, we must mark all of
4750			 * it
4751			 */
4752			notdone = 1;
4753			tp1 = TAILQ_NEXT(tp1, sctp_next);
4754		}
4755	} while (tp1 && notdone);
4756	if (foundeom == 0) {
4757		/*
4758		 * The multi-part message was scattered across the send and
4759		 * sent queue.
4760		 */
4761		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4762			if ((tp1->rec.data.stream_number != stream) ||
4763			    (tp1->rec.data.stream_seq != seq)) {
4764				break;
4765			}
4766			/*
4767			 * save to chk in case we have some on stream out
4768			 * queue. If so and we have an un-transmitted one we
4769			 * don't have to fudge the TSN.
4770			 */
4771			chk = tp1;
4772			ret_sz += tp1->book_size;
4773			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4774			if (sent) {
4775				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4776			} else {
4777				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4778			}
4779			if (tp1->data) {
4780				sctp_m_freem(tp1->data);
4781				tp1->data = NULL;
4782			}
4783			/* No flight involved here book the size to 0 */
4784			tp1->book_size = 0;
4785			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4786				foundeom = 1;
4787			}
4788			do_wakeup_routine = 1;
4789			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4790			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4791			/*
4792			 * on to the sent queue so we can wait for it to be
4793			 * passed by.
4794			 */
4795			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4796			    sctp_next);
4797			stcb->asoc.send_queue_cnt--;
4798			stcb->asoc.sent_queue_cnt++;
4799		}
4800	}
4801	if (foundeom == 0) {
4802		/*
4803		 * Still no eom found. That means there is stuff left on the
4804		 * stream out queue.. yuck.
4805		 */
4806		SCTP_TCB_SEND_LOCK(stcb);
4807		strq = &stcb->asoc.strmout[stream];
4808		sp = TAILQ_FIRST(&strq->outqueue);
4809		if (sp != NULL) {
4810			sp->discard_rest = 1;
4811			/*
4812			 * We may need to put a chunk on the queue that
4813			 * holds the TSN that would have been sent with the
4814			 * LAST bit.
4815			 */
4816			if (chk == NULL) {
4817				/* Yep, we have to */
4818				sctp_alloc_a_chunk(stcb, chk);
4819				if (chk == NULL) {
4820					/*
4821					 * we are hosed. All we can do is
4822					 * nothing.. which will cause an
4823					 * abort if the peer is paying
4824					 * attention.
4825					 */
4826					goto oh_well;
4827				}
4828				memset(chk, 0, sizeof(*chk));
4829				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4830				chk->sent = SCTP_FORWARD_TSN_SKIP;
4831				chk->asoc = &stcb->asoc;
4832				chk->rec.data.stream_seq = strq->next_sequence_send;
4833				chk->rec.data.stream_number = sp->stream;
4834				chk->rec.data.payloadtype = sp->ppid;
4835				chk->rec.data.context = sp->context;
4836				chk->flags = sp->act_flags;
4837				if (sp->net)
4838					chk->whoTo = sp->net;
4839				else
4840					chk->whoTo = stcb->asoc.primary_destination;
4841				atomic_add_int(&chk->whoTo->ref_count, 1);
4842				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4843				stcb->asoc.pr_sctp_cnt++;
4844				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4845				stcb->asoc.sent_queue_cnt++;
4846				stcb->asoc.pr_sctp_cnt++;
4847			} else {
4848				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4849			}
4850			strq->next_sequence_send++;
4851	oh_well:
4852			if (sp->data) {
4853				/*
4854				 * Pull any data to free up the SB and allow
4855				 * sender to "add more" while we will throw
4856				 * away :-)
4857				 */
4858				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4859				ret_sz += sp->length;
4860				do_wakeup_routine = 1;
4861				sp->some_taken = 1;
4862				sctp_m_freem(sp->data);
4863				sp->data = NULL;
4864				sp->tail_mbuf = NULL;
4865				sp->length = 0;
4866			}
4867		}
4868		SCTP_TCB_SEND_UNLOCK(stcb);
4869	}
4870	if (do_wakeup_routine) {
4871#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4872		struct socket *so;
4873
4874		so = SCTP_INP_SO(stcb->sctp_ep);
4875		if (!so_locked) {
4876			atomic_add_int(&stcb->asoc.refcnt, 1);
4877			SCTP_TCB_UNLOCK(stcb);
4878			SCTP_SOCKET_LOCK(so, 1);
4879			SCTP_TCB_LOCK(stcb);
4880			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4881			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4882				/* assoc was freed while we were unlocked */
4883				SCTP_SOCKET_UNLOCK(so, 1);
4884				return (ret_sz);
4885			}
4886		}
4887#endif
4888		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4889#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4890		if (!so_locked) {
4891			SCTP_SOCKET_UNLOCK(so, 1);
4892		}
4893#endif
4894	}
4895	return (ret_sz);
4896}
4897
4898/*
4899 * checks to see if the given address, sa, is one that is currently known by
4900 * the kernel note: can't distinguish the same address on multiple interfaces
4901 * and doesn't handle multiple addresses with different zone/scope id's note:
4902 * ifa_ifwithaddr() compares the entire sockaddr struct
4903 */
4904struct sctp_ifa *
4905sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4906    int holds_lock)
4907{
4908	struct sctp_laddr *laddr;
4909
4910	if (holds_lock == 0) {
4911		SCTP_INP_RLOCK(inp);
4912	}
4913	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4914		if (laddr->ifa == NULL)
4915			continue;
4916		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4917			continue;
4918#ifdef INET
4919		if (addr->sa_family == AF_INET) {
4920			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4921			    laddr->ifa->address.sin.sin_addr.s_addr) {
4922				/* found him. */
4923				if (holds_lock == 0) {
4924					SCTP_INP_RUNLOCK(inp);
4925				}
4926				return (laddr->ifa);
4927				break;
4928			}
4929		}
4930#endif
4931#ifdef INET6
4932		if (addr->sa_family == AF_INET6) {
4933			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4934			    &laddr->ifa->address.sin6)) {
4935				/* found him. */
4936				if (holds_lock == 0) {
4937					SCTP_INP_RUNLOCK(inp);
4938				}
4939				return (laddr->ifa);
4940				break;
4941			}
4942		}
4943#endif
4944	}
4945	if (holds_lock == 0) {
4946		SCTP_INP_RUNLOCK(inp);
4947	}
4948	return (NULL);
4949}
4950
4951uint32_t
4952sctp_get_ifa_hash_val(struct sockaddr *addr)
4953{
4954	switch (addr->sa_family) {
4955#ifdef INET
4956	case AF_INET:
4957		{
4958			struct sockaddr_in *sin;
4959
4960			sin = (struct sockaddr_in *)addr;
4961			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4962		}
4963#endif
4964#ifdef INET6
4965	case AF_INET6:
4966		{
4967			struct sockaddr_in6 *sin6;
4968			uint32_t hash_of_addr;
4969
4970			sin6 = (struct sockaddr_in6 *)addr;
4971			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4972			    sin6->sin6_addr.s6_addr32[1] +
4973			    sin6->sin6_addr.s6_addr32[2] +
4974			    sin6->sin6_addr.s6_addr32[3]);
4975			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4976			return (hash_of_addr);
4977		}
4978#endif
4979	default:
4980		break;
4981	}
4982	return (0);
4983}
4984
4985struct sctp_ifa *
4986sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4987{
4988	struct sctp_ifa *sctp_ifap;
4989	struct sctp_vrf *vrf;
4990	struct sctp_ifalist *hash_head;
4991	uint32_t hash_of_addr;
4992
4993	if (holds_lock == 0)
4994		SCTP_IPI_ADDR_RLOCK();
4995
4996	vrf = sctp_find_vrf(vrf_id);
4997	if (vrf == NULL) {
4998stage_right:
4999		if (holds_lock == 0)
5000			SCTP_IPI_ADDR_RUNLOCK();
5001		return (NULL);
5002	}
5003	hash_of_addr = sctp_get_ifa_hash_val(addr);
5004
5005	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5006	if (hash_head == NULL) {
5007		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5008		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5009		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5010		sctp_print_address(addr);
5011		SCTP_PRINTF("No such bucket for address\n");
5012		if (holds_lock == 0)
5013			SCTP_IPI_ADDR_RUNLOCK();
5014
5015		return (NULL);
5016	}
5017	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5018		if (sctp_ifap == NULL) {
5019#ifdef INVARIANTS
5020			panic("Huh LIST_FOREACH corrupt");
5021			goto stage_right;
5022#else
5023			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5024			goto stage_right;
5025#endif
5026		}
5027		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5028			continue;
5029#ifdef INET
5030		if (addr->sa_family == AF_INET) {
5031			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5032			    sctp_ifap->address.sin.sin_addr.s_addr) {
5033				/* found him. */
5034				if (holds_lock == 0)
5035					SCTP_IPI_ADDR_RUNLOCK();
5036				return (sctp_ifap);
5037				break;
5038			}
5039		}
5040#endif
5041#ifdef INET6
5042		if (addr->sa_family == AF_INET6) {
5043			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5044			    &sctp_ifap->address.sin6)) {
5045				/* found him. */
5046				if (holds_lock == 0)
5047					SCTP_IPI_ADDR_RUNLOCK();
5048				return (sctp_ifap);
5049				break;
5050			}
5051		}
5052#endif
5053	}
5054	if (holds_lock == 0)
5055		SCTP_IPI_ADDR_RUNLOCK();
5056	return (NULL);
5057}
5058
5059static void
5060sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5061    uint32_t rwnd_req)
5062{
5063	/* User pulled some data, do we need a rwnd update? */
5064	int r_unlocked = 0;
5065	uint32_t dif, rwnd;
5066	struct socket *so = NULL;
5067
5068	if (stcb == NULL)
5069		return;
5070
5071	atomic_add_int(&stcb->asoc.refcnt, 1);
5072
5073	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5074	    SCTP_STATE_SHUTDOWN_RECEIVED |
5075	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5076		/* Pre-check If we are freeing no update */
5077		goto no_lock;
5078	}
5079	SCTP_INP_INCR_REF(stcb->sctp_ep);
5080	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5081	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5082		goto out;
5083	}
5084	so = stcb->sctp_socket;
5085	if (so == NULL) {
5086		goto out;
5087	}
5088	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5089	/* Have you have freed enough to look */
5090	*freed_so_far = 0;
5091	/* Yep, its worth a look and the lock overhead */
5092
5093	/* Figure out what the rwnd would be */
5094	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5095	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5096		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5097	} else {
5098		dif = 0;
5099	}
5100	if (dif >= rwnd_req) {
5101		if (hold_rlock) {
5102			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5103			r_unlocked = 1;
5104		}
5105		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5106			/*
5107			 * One last check before we allow the guy possibly
5108			 * to get in. There is a race, where the guy has not
5109			 * reached the gate. In that case
5110			 */
5111			goto out;
5112		}
5113		SCTP_TCB_LOCK(stcb);
5114		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5115			/* No reports here */
5116			SCTP_TCB_UNLOCK(stcb);
5117			goto out;
5118		}
5119		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5120		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5121
5122		sctp_chunk_output(stcb->sctp_ep, stcb,
5123		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5124		/* make sure no timer is running */
5125		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5126		SCTP_TCB_UNLOCK(stcb);
5127	} else {
5128		/* Update how much we have pending */
5129		stcb->freed_by_sorcv_sincelast = dif;
5130	}
5131out:
5132	if (so && r_unlocked && hold_rlock) {
5133		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5134	}
5135	SCTP_INP_DECR_REF(stcb->sctp_ep);
5136no_lock:
5137	atomic_add_int(&stcb->asoc.refcnt, -1);
5138	return;
5139}
5140
5141int
5142sctp_sorecvmsg(struct socket *so,
5143    struct uio *uio,
5144    struct mbuf **mp,
5145    struct sockaddr *from,
5146    int fromlen,
5147    int *msg_flags,
5148    struct sctp_sndrcvinfo *sinfo,
5149    int filling_sinfo)
5150{
5151	/*
5152	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5153	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5154	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5155	 * On the way out we may send out any combination of:
5156	 * MSG_NOTIFICATION MSG_EOR
5157	 *
5158	 */
5159	struct sctp_inpcb *inp = NULL;
5160	int my_len = 0;
5161	int cp_len = 0, error = 0;
5162	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5163	struct mbuf *m = NULL;
5164	struct sctp_tcb *stcb = NULL;
5165	int wakeup_read_socket = 0;
5166	int freecnt_applied = 0;
5167	int out_flags = 0, in_flags = 0;
5168	int block_allowed = 1;
5169	uint32_t freed_so_far = 0;
5170	uint32_t copied_so_far = 0;
5171	int in_eeor_mode = 0;
5172	int no_rcv_needed = 0;
5173	uint32_t rwnd_req = 0;
5174	int hold_sblock = 0;
5175	int hold_rlock = 0;
5176	int slen = 0;
5177	uint32_t held_length = 0;
5178	int sockbuf_lock = 0;
5179
5180	if (uio == NULL) {
5181		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5182		return (EINVAL);
5183	}
5184	if (msg_flags) {
5185		in_flags = *msg_flags;
5186		if (in_flags & MSG_PEEK)
5187			SCTP_STAT_INCR(sctps_read_peeks);
5188	} else {
5189		in_flags = 0;
5190	}
5191	slen = uio->uio_resid;
5192
5193	/* Pull in and set up our int flags */
5194	if (in_flags & MSG_OOB) {
5195		/* Out of band's NOT supported */
5196		return (EOPNOTSUPP);
5197	}
5198	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5199		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5200		return (EINVAL);
5201	}
5202	if ((in_flags & (MSG_DONTWAIT
5203	    | MSG_NBIO
5204	    )) ||
5205	    SCTP_SO_IS_NBIO(so)) {
5206		block_allowed = 0;
5207	}
5208	/* setup the endpoint */
5209	inp = (struct sctp_inpcb *)so->so_pcb;
5210	if (inp == NULL) {
5211		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5212		return (EFAULT);
5213	}
5214	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5215	/* Must be at least a MTU's worth */
5216	if (rwnd_req < SCTP_MIN_RWND)
5217		rwnd_req = SCTP_MIN_RWND;
5218	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5219	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5220		sctp_misc_ints(SCTP_SORECV_ENTER,
5221		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5222	}
5223	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5224		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5225		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5226	}
5227	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5228	if (error) {
5229		goto release_unlocked;
5230	}
5231	sockbuf_lock = 1;
5232restart:
5233
5234
5235restart_nosblocks:
5236	if (hold_sblock == 0) {
5237		SOCKBUF_LOCK(&so->so_rcv);
5238		hold_sblock = 1;
5239	}
5240	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5241	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5242		goto out;
5243	}
5244	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5245		if (so->so_error) {
5246			error = so->so_error;
5247			if ((in_flags & MSG_PEEK) == 0)
5248				so->so_error = 0;
5249			goto out;
5250		} else {
5251			if (so->so_rcv.sb_cc == 0) {
5252				/* indicate EOF */
5253				error = 0;
5254				goto out;
5255			}
5256		}
5257	}
5258	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5259		/* we need to wait for data */
5260		if ((so->so_rcv.sb_cc == 0) &&
5261		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5262		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5263			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5264				/*
5265				 * For active open side clear flags for
5266				 * re-use passive open is blocked by
5267				 * connect.
5268				 */
5269				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5270					/*
5271					 * You were aborted, passive side
5272					 * always hits here
5273					 */
5274					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5275					error = ECONNRESET;
5276				}
5277				so->so_state &= ~(SS_ISCONNECTING |
5278				    SS_ISDISCONNECTING |
5279				    SS_ISCONFIRMING |
5280				    SS_ISCONNECTED);
5281				if (error == 0) {
5282					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5283						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5284						error = ENOTCONN;
5285					}
5286				}
5287				goto out;
5288			}
5289		}
5290		error = sbwait(&so->so_rcv);
5291		if (error) {
5292			goto out;
5293		}
5294		held_length = 0;
5295		goto restart_nosblocks;
5296	} else if (so->so_rcv.sb_cc == 0) {
5297		if (so->so_error) {
5298			error = so->so_error;
5299			if ((in_flags & MSG_PEEK) == 0)
5300				so->so_error = 0;
5301		} else {
5302			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5303			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5304				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5305					/*
5306					 * For active open side clear flags
5307					 * for re-use passive open is
5308					 * blocked by connect.
5309					 */
5310					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5311						/*
5312						 * You were aborted, passive
5313						 * side always hits here
5314						 */
5315						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5316						error = ECONNRESET;
5317					}
5318					so->so_state &= ~(SS_ISCONNECTING |
5319					    SS_ISDISCONNECTING |
5320					    SS_ISCONFIRMING |
5321					    SS_ISCONNECTED);
5322					if (error == 0) {
5323						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5324							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5325							error = ENOTCONN;
5326						}
5327					}
5328					goto out;
5329				}
5330			}
5331			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5332			error = EWOULDBLOCK;
5333		}
5334		goto out;
5335	}
5336	if (hold_sblock == 1) {
5337		SOCKBUF_UNLOCK(&so->so_rcv);
5338		hold_sblock = 0;
5339	}
5340	/* we possibly have data we can read */
5341	/* sa_ignore FREED_MEMORY */
5342	control = TAILQ_FIRST(&inp->read_queue);
5343	if (control == NULL) {
5344		/*
5345		 * This could be happening since the appender did the
5346		 * increment but as not yet did the tailq insert onto the
5347		 * read_queue
5348		 */
5349		if (hold_rlock == 0) {
5350			SCTP_INP_READ_LOCK(inp);
5351		}
5352		control = TAILQ_FIRST(&inp->read_queue);
5353		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5354#ifdef INVARIANTS
5355			panic("Huh, its non zero and nothing on control?");
5356#endif
5357			so->so_rcv.sb_cc = 0;
5358		}
5359		SCTP_INP_READ_UNLOCK(inp);
5360		hold_rlock = 0;
5361		goto restart;
5362	}
5363	if ((control->length == 0) &&
5364	    (control->do_not_ref_stcb)) {
5365		/*
5366		 * Clean up code for freeing assoc that left behind a
5367		 * pdapi.. maybe a peer in EEOR that just closed after
5368		 * sending and never indicated a EOR.
5369		 */
5370		if (hold_rlock == 0) {
5371			hold_rlock = 1;
5372			SCTP_INP_READ_LOCK(inp);
5373		}
5374		control->held_length = 0;
5375		if (control->data) {
5376			/* Hmm there is data here .. fix */
5377			struct mbuf *m_tmp;
5378			int cnt = 0;
5379
5380			m_tmp = control->data;
5381			while (m_tmp) {
5382				cnt += SCTP_BUF_LEN(m_tmp);
5383				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5384					control->tail_mbuf = m_tmp;
5385					control->end_added = 1;
5386				}
5387				m_tmp = SCTP_BUF_NEXT(m_tmp);
5388			}
5389			control->length = cnt;
5390		} else {
5391			/* remove it */
5392			TAILQ_REMOVE(&inp->read_queue, control, next);
5393			/* Add back any hiddend data */
5394			sctp_free_remote_addr(control->whoFrom);
5395			sctp_free_a_readq(stcb, control);
5396		}
5397		if (hold_rlock) {
5398			hold_rlock = 0;
5399			SCTP_INP_READ_UNLOCK(inp);
5400		}
5401		goto restart;
5402	}
5403	if ((control->length == 0) &&
5404	    (control->end_added == 1)) {
5405		/*
5406		 * Do we also need to check for (control->pdapi_aborted ==
5407		 * 1)?
5408		 */
5409		if (hold_rlock == 0) {
5410			hold_rlock = 1;
5411			SCTP_INP_READ_LOCK(inp);
5412		}
5413		TAILQ_REMOVE(&inp->read_queue, control, next);
5414		if (control->data) {
5415#ifdef INVARIANTS
5416			panic("control->data not null but control->length == 0");
5417#else
5418			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5419			sctp_m_freem(control->data);
5420			control->data = NULL;
5421#endif
5422		}
5423		if (control->aux_data) {
5424			sctp_m_free(control->aux_data);
5425			control->aux_data = NULL;
5426		}
5427		sctp_free_remote_addr(control->whoFrom);
5428		sctp_free_a_readq(stcb, control);
5429		if (hold_rlock) {
5430			hold_rlock = 0;
5431			SCTP_INP_READ_UNLOCK(inp);
5432		}
5433		goto restart;
5434	}
5435	if (control->length == 0) {
5436		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5437		    (filling_sinfo)) {
5438			/* find a more suitable one then this */
5439			ctl = TAILQ_NEXT(control, next);
5440			while (ctl) {
5441				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5442				    (ctl->some_taken ||
5443				    (ctl->spec_flags & M_NOTIFICATION) ||
5444				    ((ctl->do_not_ref_stcb == 0) &&
5445				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5446				    ) {
5447					/*-
5448					 * If we have a different TCB next, and there is data
5449					 * present. If we have already taken some (pdapi), OR we can
5450					 * ref the tcb and no delivery as started on this stream, we
5451					 * take it. Note we allow a notification on a different
5452					 * assoc to be delivered..
5453					 */
5454					control = ctl;
5455					goto found_one;
5456				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5457					    (ctl->length) &&
5458					    ((ctl->some_taken) ||
5459					    ((ctl->do_not_ref_stcb == 0) &&
5460					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5461				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5462					/*-
5463					 * If we have the same tcb, and there is data present, and we
5464					 * have the strm interleave feature present. Then if we have
5465					 * taken some (pdapi) or we can refer to tht tcb AND we have
5466					 * not started a delivery for this stream, we can take it.
5467					 * Note we do NOT allow a notificaiton on the same assoc to
5468					 * be delivered.
5469					 */
5470					control = ctl;
5471					goto found_one;
5472				}
5473				ctl = TAILQ_NEXT(ctl, next);
5474			}
5475		}
5476		/*
5477		 * if we reach here, not suitable replacement is available
5478		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5479		 * into the our held count, and its time to sleep again.
5480		 */
5481		held_length = so->so_rcv.sb_cc;
5482		control->held_length = so->so_rcv.sb_cc;
5483		goto restart;
5484	}
5485	/* Clear the held length since there is something to read */
5486	control->held_length = 0;
5487	if (hold_rlock) {
5488		SCTP_INP_READ_UNLOCK(inp);
5489		hold_rlock = 0;
5490	}
5491found_one:
5492	/*
5493	 * If we reach here, control has a some data for us to read off.
5494	 * Note that stcb COULD be NULL.
5495	 */
5496	control->some_taken++;
5497	if (hold_sblock) {
5498		SOCKBUF_UNLOCK(&so->so_rcv);
5499		hold_sblock = 0;
5500	}
5501	stcb = control->stcb;
5502	if (stcb) {
5503		if ((control->do_not_ref_stcb == 0) &&
5504		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5505			if (freecnt_applied == 0)
5506				stcb = NULL;
5507		} else if (control->do_not_ref_stcb == 0) {
5508			/* you can't free it on me please */
5509			/*
5510			 * The lock on the socket buffer protects us so the
5511			 * free code will stop. But since we used the
5512			 * socketbuf lock and the sender uses the tcb_lock
5513			 * to increment, we need to use the atomic add to
5514			 * the refcnt
5515			 */
5516			if (freecnt_applied) {
5517#ifdef INVARIANTS
5518				panic("refcnt already incremented");
5519#else
5520				SCTP_PRINTF("refcnt already incremented?\n");
5521#endif
5522			} else {
5523				atomic_add_int(&stcb->asoc.refcnt, 1);
5524				freecnt_applied = 1;
5525			}
5526			/*
5527			 * Setup to remember how much we have not yet told
5528			 * the peer our rwnd has opened up. Note we grab the
5529			 * value from the tcb from last time. Note too that
5530			 * sack sending clears this when a sack is sent,
5531			 * which is fine. Once we hit the rwnd_req, we then
5532			 * will go to the sctp_user_rcvd() that will not
5533			 * lock until it KNOWs it MUST send a WUP-SACK.
5534			 */
5535			freed_so_far = stcb->freed_by_sorcv_sincelast;
5536			stcb->freed_by_sorcv_sincelast = 0;
5537		}
5538	}
5539	if (stcb &&
5540	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5541	    control->do_not_ref_stcb == 0) {
5542		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5543	}
5544	/* First lets get off the sinfo and sockaddr info */
5545	if ((sinfo) && filling_sinfo) {
5546		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5547		nxt = TAILQ_NEXT(control, next);
5548		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5549		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5550			struct sctp_extrcvinfo *s_extra;
5551
5552			s_extra = (struct sctp_extrcvinfo *)sinfo;
5553			if ((nxt) &&
5554			    (nxt->length)) {
5555				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5556				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5557					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5558				}
5559				if (nxt->spec_flags & M_NOTIFICATION) {
5560					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5561				}
5562				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5563				s_extra->sreinfo_next_length = nxt->length;
5564				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5565				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5566				if (nxt->tail_mbuf != NULL) {
5567					if (nxt->end_added) {
5568						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5569					}
5570				}
5571			} else {
5572				/*
5573				 * we explicitly 0 this, since the memcpy
5574				 * got some other things beyond the older
5575				 * sinfo_ that is on the control's structure
5576				 * :-D
5577				 */
5578				nxt = NULL;
5579				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5580				s_extra->sreinfo_next_aid = 0;
5581				s_extra->sreinfo_next_length = 0;
5582				s_extra->sreinfo_next_ppid = 0;
5583				s_extra->sreinfo_next_stream = 0;
5584			}
5585		}
5586		/*
5587		 * update off the real current cum-ack, if we have an stcb.
5588		 */
5589		if ((control->do_not_ref_stcb == 0) && stcb)
5590			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5591		/*
5592		 * mask off the high bits, we keep the actual chunk bits in
5593		 * there.
5594		 */
5595		sinfo->sinfo_flags &= 0x00ff;
5596		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5597			sinfo->sinfo_flags |= SCTP_UNORDERED;
5598		}
5599	}
5600#ifdef SCTP_ASOCLOG_OF_TSNS
5601	{
5602		int index, newindex;
5603		struct sctp_pcbtsn_rlog *entry;
5604
5605		do {
5606			index = inp->readlog_index;
5607			newindex = index + 1;
5608			if (newindex >= SCTP_READ_LOG_SIZE) {
5609				newindex = 0;
5610			}
5611		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5612		entry = &inp->readlog[index];
5613		entry->vtag = control->sinfo_assoc_id;
5614		entry->strm = control->sinfo_stream;
5615		entry->seq = control->sinfo_ssn;
5616		entry->sz = control->length;
5617		entry->flgs = control->sinfo_flags;
5618	}
5619#endif
5620	if (fromlen && from) {
5621		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5622		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5623#ifdef INET6
5624		case AF_INET6:
5625			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5626			break;
5627#endif
5628#ifdef INET
5629		case AF_INET:
5630			((struct sockaddr_in *)from)->sin_port = control->port_from;
5631			break;
5632#endif
5633		default:
5634			break;
5635		}
5636		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5637
5638#if defined(INET) && defined(INET6)
5639		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5640		    (from->sa_family == AF_INET) &&
5641		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5642			struct sockaddr_in *sin;
5643			struct sockaddr_in6 sin6;
5644
5645			sin = (struct sockaddr_in *)from;
5646			bzero(&sin6, sizeof(sin6));
5647			sin6.sin6_family = AF_INET6;
5648			sin6.sin6_len = sizeof(struct sockaddr_in6);
5649			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5650			bcopy(&sin->sin_addr,
5651			    &sin6.sin6_addr.s6_addr32[3],
5652			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5653			sin6.sin6_port = sin->sin_port;
5654			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5655		}
5656#endif
5657#ifdef INET6
5658		{
5659			struct sockaddr_in6 lsa6, *from6;
5660
5661			from6 = (struct sockaddr_in6 *)from;
5662			sctp_recover_scope_mac(from6, (&lsa6));
5663		}
5664#endif
5665	}
5666	/* now copy out what data we can */
5667	if (mp == NULL) {
5668		/* copy out each mbuf in the chain up to length */
5669get_more_data:
5670		m = control->data;
5671		while (m) {
5672			/* Move out all we can */
5673			cp_len = (int)uio->uio_resid;
5674			my_len = (int)SCTP_BUF_LEN(m);
5675			if (cp_len > my_len) {
5676				/* not enough in this buf */
5677				cp_len = my_len;
5678			}
5679			if (hold_rlock) {
5680				SCTP_INP_READ_UNLOCK(inp);
5681				hold_rlock = 0;
5682			}
5683			if (cp_len > 0)
5684				error = uiomove(mtod(m, char *), cp_len, uio);
5685			/* re-read */
5686			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5687				goto release;
5688			}
5689			if ((control->do_not_ref_stcb == 0) && stcb &&
5690			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5691				no_rcv_needed = 1;
5692			}
5693			if (error) {
5694				/* error we are out of here */
5695				goto release;
5696			}
5697			if ((SCTP_BUF_NEXT(m) == NULL) &&
5698			    (cp_len >= SCTP_BUF_LEN(m)) &&
5699			    ((control->end_added == 0) ||
5700			    (control->end_added &&
5701			    (TAILQ_NEXT(control, next) == NULL)))
5702			    ) {
5703				SCTP_INP_READ_LOCK(inp);
5704				hold_rlock = 1;
5705			}
5706			if (cp_len == SCTP_BUF_LEN(m)) {
5707				if ((SCTP_BUF_NEXT(m) == NULL) &&
5708				    (control->end_added)) {
5709					out_flags |= MSG_EOR;
5710					if ((control->do_not_ref_stcb == 0) &&
5711					    (control->stcb != NULL) &&
5712					    ((control->spec_flags & M_NOTIFICATION) == 0))
5713						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5714				}
5715				if (control->spec_flags & M_NOTIFICATION) {
5716					out_flags |= MSG_NOTIFICATION;
5717				}
5718				/* we ate up the mbuf */
5719				if (in_flags & MSG_PEEK) {
5720					/* just looking */
5721					m = SCTP_BUF_NEXT(m);
5722					copied_so_far += cp_len;
5723				} else {
5724					/* dispose of the mbuf */
5725					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5726						sctp_sblog(&so->so_rcv,
5727						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5728					}
5729					sctp_sbfree(control, stcb, &so->so_rcv, m);
5730					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5731						sctp_sblog(&so->so_rcv,
5732						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5733					}
5734					copied_so_far += cp_len;
5735					freed_so_far += cp_len;
5736					freed_so_far += MSIZE;
5737					atomic_subtract_int(&control->length, cp_len);
5738					control->data = sctp_m_free(m);
5739					m = control->data;
5740					/*
5741					 * been through it all, must hold sb
5742					 * lock ok to null tail
5743					 */
5744					if (control->data == NULL) {
5745#ifdef INVARIANTS
5746						if ((control->end_added == 0) ||
5747						    (TAILQ_NEXT(control, next) == NULL)) {
5748							/*
5749							 * If the end is not
5750							 * added, OR the
5751							 * next is NOT null
5752							 * we MUST have the
5753							 * lock.
5754							 */
5755							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5756								panic("Hmm we don't own the lock?");
5757							}
5758						}
5759#endif
5760						control->tail_mbuf = NULL;
5761#ifdef INVARIANTS
5762						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5763							panic("end_added, nothing left and no MSG_EOR");
5764						}
5765#endif
5766					}
5767				}
5768			} else {
5769				/* Do we need to trim the mbuf? */
5770				if (control->spec_flags & M_NOTIFICATION) {
5771					out_flags |= MSG_NOTIFICATION;
5772				}
5773				if ((in_flags & MSG_PEEK) == 0) {
5774					SCTP_BUF_RESV_UF(m, cp_len);
5775					SCTP_BUF_LEN(m) -= cp_len;
5776					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5777						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5778					}
5779					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5780					if ((control->do_not_ref_stcb == 0) &&
5781					    stcb) {
5782						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5783					}
5784					copied_so_far += cp_len;
5785					freed_so_far += cp_len;
5786					freed_so_far += MSIZE;
5787					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5788						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5789						    SCTP_LOG_SBRESULT, 0);
5790					}
5791					atomic_subtract_int(&control->length, cp_len);
5792				} else {
5793					copied_so_far += cp_len;
5794				}
5795			}
5796			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5797				break;
5798			}
5799			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5800			    (control->do_not_ref_stcb == 0) &&
5801			    (freed_so_far >= rwnd_req)) {
5802				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5803			}
5804		}		/* end while(m) */
5805		/*
5806		 * At this point we have looked at it all and we either have
5807		 * a MSG_EOR/or read all the user wants... <OR>
5808		 * control->length == 0.
5809		 */
5810		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5811			/* we are done with this control */
5812			if (control->length == 0) {
5813				if (control->data) {
5814#ifdef INVARIANTS
5815					panic("control->data not null at read eor?");
5816#else
5817					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5818					sctp_m_freem(control->data);
5819					control->data = NULL;
5820#endif
5821				}
5822		done_with_control:
5823				if (TAILQ_NEXT(control, next) == NULL) {
5824					/*
5825					 * If we don't have a next we need a
5826					 * lock, if there is a next
5827					 * interrupt is filling ahead of us
5828					 * and we don't need a lock to
5829					 * remove this guy (which is the
5830					 * head of the queue).
5831					 */
5832					if (hold_rlock == 0) {
5833						SCTP_INP_READ_LOCK(inp);
5834						hold_rlock = 1;
5835					}
5836				}
5837				TAILQ_REMOVE(&inp->read_queue, control, next);
5838				/* Add back any hiddend data */
5839				if (control->held_length) {
5840					held_length = 0;
5841					control->held_length = 0;
5842					wakeup_read_socket = 1;
5843				}
5844				if (control->aux_data) {
5845					sctp_m_free(control->aux_data);
5846					control->aux_data = NULL;
5847				}
5848				no_rcv_needed = control->do_not_ref_stcb;
5849				sctp_free_remote_addr(control->whoFrom);
5850				control->data = NULL;
5851				sctp_free_a_readq(stcb, control);
5852				control = NULL;
5853				if ((freed_so_far >= rwnd_req) &&
5854				    (no_rcv_needed == 0))
5855					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5856
5857			} else {
5858				/*
5859				 * The user did not read all of this
5860				 * message, turn off the returned MSG_EOR
5861				 * since we are leaving more behind on the
5862				 * control to read.
5863				 */
5864#ifdef INVARIANTS
5865				if (control->end_added &&
5866				    (control->data == NULL) &&
5867				    (control->tail_mbuf == NULL)) {
5868					panic("Gak, control->length is corrupt?");
5869				}
5870#endif
5871				no_rcv_needed = control->do_not_ref_stcb;
5872				out_flags &= ~MSG_EOR;
5873			}
5874		}
5875		if (out_flags & MSG_EOR) {
5876			goto release;
5877		}
5878		if ((uio->uio_resid == 0) ||
5879		    ((in_eeor_mode) &&
5880		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5881			goto release;
5882		}
5883		/*
5884		 * If I hit here the receiver wants more and this message is
5885		 * NOT done (pd-api). So two questions. Can we block? if not
5886		 * we are done. Did the user NOT set MSG_WAITALL?
5887		 */
5888		if (block_allowed == 0) {
5889			goto release;
5890		}
5891		/*
5892		 * We need to wait for more data a few things: - We don't
5893		 * sbunlock() so we don't get someone else reading. - We
5894		 * must be sure to account for the case where what is added
5895		 * is NOT to our control when we wakeup.
5896		 */
5897
5898		/*
5899		 * Do we need to tell the transport a rwnd update might be
5900		 * needed before we go to sleep?
5901		 */
5902		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5903		    ((freed_so_far >= rwnd_req) &&
5904		    (control->do_not_ref_stcb == 0) &&
5905		    (no_rcv_needed == 0))) {
5906			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5907		}
5908wait_some_more:
5909		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5910			goto release;
5911		}
5912		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5913			goto release;
5914
5915		if (hold_rlock == 1) {
5916			SCTP_INP_READ_UNLOCK(inp);
5917			hold_rlock = 0;
5918		}
5919		if (hold_sblock == 0) {
5920			SOCKBUF_LOCK(&so->so_rcv);
5921			hold_sblock = 1;
5922		}
5923		if ((copied_so_far) && (control->length == 0) &&
5924		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5925			goto release;
5926		}
5927		if (so->so_rcv.sb_cc <= control->held_length) {
5928			error = sbwait(&so->so_rcv);
5929			if (error) {
5930				goto release;
5931			}
5932			control->held_length = 0;
5933		}
5934		if (hold_sblock) {
5935			SOCKBUF_UNLOCK(&so->so_rcv);
5936			hold_sblock = 0;
5937		}
5938		if (control->length == 0) {
5939			/* still nothing here */
5940			if (control->end_added == 1) {
5941				/* he aborted, or is done i.e.did a shutdown */
5942				out_flags |= MSG_EOR;
5943				if (control->pdapi_aborted) {
5944					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5945						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5946
5947					out_flags |= MSG_TRUNC;
5948				} else {
5949					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5950						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5951				}
5952				goto done_with_control;
5953			}
5954			if (so->so_rcv.sb_cc > held_length) {
5955				control->held_length = so->so_rcv.sb_cc;
5956				held_length = 0;
5957			}
5958			goto wait_some_more;
5959		} else if (control->data == NULL) {
5960			/*
5961			 * we must re-sync since data is probably being
5962			 * added
5963			 */
5964			SCTP_INP_READ_LOCK(inp);
5965			if ((control->length > 0) && (control->data == NULL)) {
5966				/*
5967				 * big trouble.. we have the lock and its
5968				 * corrupt?
5969				 */
5970#ifdef INVARIANTS
5971				panic("Impossible data==NULL length !=0");
5972#endif
5973				out_flags |= MSG_EOR;
5974				out_flags |= MSG_TRUNC;
5975				control->length = 0;
5976				SCTP_INP_READ_UNLOCK(inp);
5977				goto done_with_control;
5978			}
5979			SCTP_INP_READ_UNLOCK(inp);
5980			/* We will fall around to get more data */
5981		}
5982		goto get_more_data;
5983	} else {
5984		/*-
5985		 * Give caller back the mbuf chain,
5986		 * store in uio_resid the length
5987		 */
5988		wakeup_read_socket = 0;
5989		if ((control->end_added == 0) ||
5990		    (TAILQ_NEXT(control, next) == NULL)) {
5991			/* Need to get rlock */
5992			if (hold_rlock == 0) {
5993				SCTP_INP_READ_LOCK(inp);
5994				hold_rlock = 1;
5995			}
5996		}
5997		if (control->end_added) {
5998			out_flags |= MSG_EOR;
5999			if ((control->do_not_ref_stcb == 0) &&
6000			    (control->stcb != NULL) &&
6001			    ((control->spec_flags & M_NOTIFICATION) == 0))
6002				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6003		}
6004		if (control->spec_flags & M_NOTIFICATION) {
6005			out_flags |= MSG_NOTIFICATION;
6006		}
6007		uio->uio_resid = control->length;
6008		*mp = control->data;
6009		m = control->data;
6010		while (m) {
6011			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6012				sctp_sblog(&so->so_rcv,
6013				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6014			}
6015			sctp_sbfree(control, stcb, &so->so_rcv, m);
6016			freed_so_far += SCTP_BUF_LEN(m);
6017			freed_so_far += MSIZE;
6018			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6019				sctp_sblog(&so->so_rcv,
6020				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6021			}
6022			m = SCTP_BUF_NEXT(m);
6023		}
6024		control->data = control->tail_mbuf = NULL;
6025		control->length = 0;
6026		if (out_flags & MSG_EOR) {
6027			/* Done with this control */
6028			goto done_with_control;
6029		}
6030	}
6031release:
6032	if (hold_rlock == 1) {
6033		SCTP_INP_READ_UNLOCK(inp);
6034		hold_rlock = 0;
6035	}
6036	if (hold_sblock == 1) {
6037		SOCKBUF_UNLOCK(&so->so_rcv);
6038		hold_sblock = 0;
6039	}
6040	sbunlock(&so->so_rcv);
6041	sockbuf_lock = 0;
6042
6043release_unlocked:
6044	if (hold_sblock) {
6045		SOCKBUF_UNLOCK(&so->so_rcv);
6046		hold_sblock = 0;
6047	}
6048	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6049		if ((freed_so_far >= rwnd_req) &&
6050		    (control && (control->do_not_ref_stcb == 0)) &&
6051		    (no_rcv_needed == 0))
6052			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6053	}
6054out:
6055	if (msg_flags) {
6056		*msg_flags = out_flags;
6057	}
6058	if (((out_flags & MSG_EOR) == 0) &&
6059	    ((in_flags & MSG_PEEK) == 0) &&
6060	    (sinfo) &&
6061	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6062	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6063		struct sctp_extrcvinfo *s_extra;
6064
6065		s_extra = (struct sctp_extrcvinfo *)sinfo;
6066		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6067	}
6068	if (hold_rlock == 1) {
6069		SCTP_INP_READ_UNLOCK(inp);
6070	}
6071	if (hold_sblock) {
6072		SOCKBUF_UNLOCK(&so->so_rcv);
6073	}
6074	if (sockbuf_lock) {
6075		sbunlock(&so->so_rcv);
6076	}
6077	if (freecnt_applied) {
6078		/*
6079		 * The lock on the socket buffer protects us so the free
6080		 * code will stop. But since we used the socketbuf lock and
6081		 * the sender uses the tcb_lock to increment, we need to use
6082		 * the atomic add to the refcnt.
6083		 */
6084		if (stcb == NULL) {
6085#ifdef INVARIANTS
6086			panic("stcb for refcnt has gone NULL?");
6087			goto stage_left;
6088#else
6089			goto stage_left;
6090#endif
6091		}
6092		atomic_add_int(&stcb->asoc.refcnt, -1);
6093		/* Save the value back for next time */
6094		stcb->freed_by_sorcv_sincelast = freed_so_far;
6095	}
6096	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6097		if (stcb) {
6098			sctp_misc_ints(SCTP_SORECV_DONE,
6099			    freed_so_far,
6100			    ((uio) ? (slen - uio->uio_resid) : slen),
6101			    stcb->asoc.my_rwnd,
6102			    so->so_rcv.sb_cc);
6103		} else {
6104			sctp_misc_ints(SCTP_SORECV_DONE,
6105			    freed_so_far,
6106			    ((uio) ? (slen - uio->uio_resid) : slen),
6107			    0,
6108			    so->so_rcv.sb_cc);
6109		}
6110	}
6111stage_left:
6112	if (wakeup_read_socket) {
6113		sctp_sorwakeup(inp, so);
6114	}
6115	return (error);
6116}
6117
6118
6119#ifdef SCTP_MBUF_LOGGING
6120struct mbuf *
6121sctp_m_free(struct mbuf *m)
6122{
6123	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6124		if (SCTP_BUF_IS_EXTENDED(m)) {
6125			sctp_log_mb(m, SCTP_MBUF_IFREE);
6126		}
6127	}
6128	return (m_free(m));
6129}
6130
6131void
6132sctp_m_freem(struct mbuf *mb)
6133{
6134	while (mb != NULL)
6135		mb = sctp_m_free(mb);
6136}
6137
6138#endif
6139
6140int
6141sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6142{
6143	/*
6144	 * Given a local address. For all associations that holds the
6145	 * address, request a peer-set-primary.
6146	 */
6147	struct sctp_ifa *ifa;
6148	struct sctp_laddr *wi;
6149
6150	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6151	if (ifa == NULL) {
6152		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6153		return (EADDRNOTAVAIL);
6154	}
6155	/*
6156	 * Now that we have the ifa we must awaken the iterator with this
6157	 * message.
6158	 */
6159	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6160	if (wi == NULL) {
6161		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6162		return (ENOMEM);
6163	}
6164	/* Now incr the count and int wi structure */
6165	SCTP_INCR_LADDR_COUNT();
6166	bzero(wi, sizeof(*wi));
6167	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6168	wi->ifa = ifa;
6169	wi->action = SCTP_SET_PRIM_ADDR;
6170	atomic_add_int(&ifa->refcount, 1);
6171
6172	/* Now add it to the work queue */
6173	SCTP_WQ_ADDR_LOCK();
6174	/*
6175	 * Should this really be a tailq? As it is we will process the
6176	 * newest first :-0
6177	 */
6178	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6179	SCTP_WQ_ADDR_UNLOCK();
6180	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6181	    (struct sctp_inpcb *)NULL,
6182	    (struct sctp_tcb *)NULL,
6183	    (struct sctp_nets *)NULL);
6184	return (0);
6185}
6186
6187
6188int
6189sctp_soreceive(struct socket *so,
6190    struct sockaddr **psa,
6191    struct uio *uio,
6192    struct mbuf **mp0,
6193    struct mbuf **controlp,
6194    int *flagsp)
6195{
6196	int error, fromlen;
6197	uint8_t sockbuf[256];
6198	struct sockaddr *from;
6199	struct sctp_extrcvinfo sinfo;
6200	int filling_sinfo = 1;
6201	struct sctp_inpcb *inp;
6202
6203	inp = (struct sctp_inpcb *)so->so_pcb;
6204	/* pickup the assoc we are reading from */
6205	if (inp == NULL) {
6206		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6207		return (EINVAL);
6208	}
6209	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6210	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6211	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6212	    (controlp == NULL)) {
6213		/* user does not want the sndrcv ctl */
6214		filling_sinfo = 0;
6215	}
6216	if (psa) {
6217		from = (struct sockaddr *)sockbuf;
6218		fromlen = sizeof(sockbuf);
6219		from->sa_len = 0;
6220	} else {
6221		from = NULL;
6222		fromlen = 0;
6223	}
6224
6225	if (filling_sinfo) {
6226		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6227	}
6228	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6229	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6230	if (controlp != NULL) {
6231		/* copy back the sinfo in a CMSG format */
6232		if (filling_sinfo)
6233			*controlp = sctp_build_ctl_nchunk(inp,
6234			    (struct sctp_sndrcvinfo *)&sinfo);
6235		else
6236			*controlp = NULL;
6237	}
6238	if (psa) {
6239		/* copy back the address info */
6240		if (from && from->sa_len) {
6241			*psa = sodupsockaddr(from, M_NOWAIT);
6242		} else {
6243			*psa = NULL;
6244		}
6245	}
6246	return (error);
6247}
6248
6249
6250
6251
6252
6253int
6254sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6255    int totaddr, int *error)
6256{
6257	int added = 0;
6258	int i;
6259	struct sctp_inpcb *inp;
6260	struct sockaddr *sa;
6261	size_t incr = 0;
6262
6263#ifdef INET
6264	struct sockaddr_in *sin;
6265
6266#endif
6267#ifdef INET6
6268	struct sockaddr_in6 *sin6;
6269
6270#endif
6271
6272	sa = addr;
6273	inp = stcb->sctp_ep;
6274	*error = 0;
6275	for (i = 0; i < totaddr; i++) {
6276		switch (sa->sa_family) {
6277#ifdef INET
6278		case AF_INET:
6279			incr = sizeof(struct sockaddr_in);
6280			sin = (struct sockaddr_in *)sa;
6281			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6282			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6283			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6284				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6285				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6286				*error = EINVAL;
6287				goto out_now;
6288			}
6289			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6290				/* assoc gone no un-lock */
6291				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6292				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6293				*error = ENOBUFS;
6294				goto out_now;
6295			}
6296			added++;
6297			break;
6298#endif
6299#ifdef INET6
6300		case AF_INET6:
6301			incr = sizeof(struct sockaddr_in6);
6302			sin6 = (struct sockaddr_in6 *)sa;
6303			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6304			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6305				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6306				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6307				*error = EINVAL;
6308				goto out_now;
6309			}
6310			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6311				/* assoc gone no un-lock */
6312				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6313				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6314				*error = ENOBUFS;
6315				goto out_now;
6316			}
6317			added++;
6318			break;
6319#endif
6320		default:
6321			break;
6322		}
6323		sa = (struct sockaddr *)((caddr_t)sa + incr);
6324	}
6325out_now:
6326	return (added);
6327}
6328
6329struct sctp_tcb *
6330sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6331    int *totaddr, int *num_v4, int *num_v6, int *error,
6332    int limit, int *bad_addr)
6333{
6334	struct sockaddr *sa;
6335	struct sctp_tcb *stcb = NULL;
6336	size_t incr, at, i;
6337
6338	at = incr = 0;
6339	sa = addr;
6340
6341	*error = *num_v6 = *num_v4 = 0;
6342	/* account and validate addresses */
6343	for (i = 0; i < (size_t)*totaddr; i++) {
6344		switch (sa->sa_family) {
6345#ifdef INET
6346		case AF_INET:
6347			(*num_v4) += 1;
6348			incr = sizeof(struct sockaddr_in);
6349			if (sa->sa_len != incr) {
6350				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6351				*error = EINVAL;
6352				*bad_addr = 1;
6353				return (NULL);
6354			}
6355			break;
6356#endif
6357#ifdef INET6
6358		case AF_INET6:
6359			{
6360				struct sockaddr_in6 *sin6;
6361
6362				sin6 = (struct sockaddr_in6 *)sa;
6363				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6364					/* Must be non-mapped for connectx */
6365					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6366					*error = EINVAL;
6367					*bad_addr = 1;
6368					return (NULL);
6369				}
6370				(*num_v6) += 1;
6371				incr = sizeof(struct sockaddr_in6);
6372				if (sa->sa_len != incr) {
6373					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6374					*error = EINVAL;
6375					*bad_addr = 1;
6376					return (NULL);
6377				}
6378				break;
6379			}
6380#endif
6381		default:
6382			*totaddr = i;
6383			/* we are done */
6384			break;
6385		}
6386		if (i == (size_t)*totaddr) {
6387			break;
6388		}
6389		SCTP_INP_INCR_REF(inp);
6390		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6391		if (stcb != NULL) {
6392			/* Already have or am bring up an association */
6393			return (stcb);
6394		} else {
6395			SCTP_INP_DECR_REF(inp);
6396		}
6397		if ((at + incr) > (size_t)limit) {
6398			*totaddr = i;
6399			break;
6400		}
6401		sa = (struct sockaddr *)((caddr_t)sa + incr);
6402	}
6403	return ((struct sctp_tcb *)NULL);
6404}
6405
6406/*
6407 * sctp_bindx(ADD) for one address.
6408 * assumes all arguments are valid/checked by caller.
6409 */
6410void
6411sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6412    struct sockaddr *sa, sctp_assoc_t assoc_id,
6413    uint32_t vrf_id, int *error, void *p)
6414{
6415	struct sockaddr *addr_touse;
6416
6417#ifdef INET6
6418	struct sockaddr_in sin;
6419
6420#endif
6421
6422	/* see if we're bound all already! */
6423	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6424		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6425		*error = EINVAL;
6426		return;
6427	}
6428	addr_touse = sa;
6429#ifdef INET6
6430	if (sa->sa_family == AF_INET6) {
6431		struct sockaddr_in6 *sin6;
6432
6433		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6434			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6435			*error = EINVAL;
6436			return;
6437		}
6438		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6439			/* can only bind v6 on PF_INET6 sockets */
6440			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6441			*error = EINVAL;
6442			return;
6443		}
6444		sin6 = (struct sockaddr_in6 *)addr_touse;
6445		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6446			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6447			    SCTP_IPV6_V6ONLY(inp)) {
6448				/* can't bind v4-mapped on PF_INET sockets */
6449				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6450				*error = EINVAL;
6451				return;
6452			}
6453			in6_sin6_2_sin(&sin, sin6);
6454			addr_touse = (struct sockaddr *)&sin;
6455		}
6456	}
6457#endif
6458#ifdef INET
6459	if (sa->sa_family == AF_INET) {
6460		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6461			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6462			*error = EINVAL;
6463			return;
6464		}
6465		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6466		    SCTP_IPV6_V6ONLY(inp)) {
6467			/* can't bind v4 on PF_INET sockets */
6468			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6469			*error = EINVAL;
6470			return;
6471		}
6472	}
6473#endif
6474	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6475		if (p == NULL) {
6476			/* Can't get proc for Net/Open BSD */
6477			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6478			*error = EINVAL;
6479			return;
6480		}
6481		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6482		return;
6483	}
6484	/*
6485	 * No locks required here since bind and mgmt_ep_sa all do their own
6486	 * locking. If we do something for the FIX: below we may need to
6487	 * lock in that case.
6488	 */
6489	if (assoc_id == 0) {
6490		/* add the address */
6491		struct sctp_inpcb *lep;
6492		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6493
6494		/* validate the incoming port */
6495		if ((lsin->sin_port != 0) &&
6496		    (lsin->sin_port != inp->sctp_lport)) {
6497			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6498			*error = EINVAL;
6499			return;
6500		} else {
6501			/* user specified 0 port, set it to existing port */
6502			lsin->sin_port = inp->sctp_lport;
6503		}
6504
6505		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6506		if (lep != NULL) {
6507			/*
6508			 * We must decrement the refcount since we have the
6509			 * ep already and are binding. No remove going on
6510			 * here.
6511			 */
6512			SCTP_INP_DECR_REF(lep);
6513		}
6514		if (lep == inp) {
6515			/* already bound to it.. ok */
6516			return;
6517		} else if (lep == NULL) {
6518			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6519			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6520			    SCTP_ADD_IP_ADDRESS,
6521			    vrf_id, NULL);
6522		} else {
6523			*error = EADDRINUSE;
6524		}
6525		if (*error)
6526			return;
6527	} else {
6528		/*
6529		 * FIX: decide whether we allow assoc based bindx
6530		 */
6531	}
6532}
6533
6534/*
6535 * sctp_bindx(DELETE) for one address.
6536 * assumes all arguments are valid/checked by caller.
6537 */
6538void
6539sctp_bindx_delete_address(struct sctp_inpcb *inp,
6540    struct sockaddr *sa, sctp_assoc_t assoc_id,
6541    uint32_t vrf_id, int *error)
6542{
6543	struct sockaddr *addr_touse;
6544
6545#ifdef INET6
6546	struct sockaddr_in sin;
6547
6548#endif
6549
6550	/* see if we're bound all already! */
6551	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6552		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6553		*error = EINVAL;
6554		return;
6555	}
6556	addr_touse = sa;
6557#ifdef INET6
6558	if (sa->sa_family == AF_INET6) {
6559		struct sockaddr_in6 *sin6;
6560
6561		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6562			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6563			*error = EINVAL;
6564			return;
6565		}
6566		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6567			/* can only bind v6 on PF_INET6 sockets */
6568			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6569			*error = EINVAL;
6570			return;
6571		}
6572		sin6 = (struct sockaddr_in6 *)addr_touse;
6573		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6574			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6575			    SCTP_IPV6_V6ONLY(inp)) {
6576				/* can't bind mapped-v4 on PF_INET sockets */
6577				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6578				*error = EINVAL;
6579				return;
6580			}
6581			in6_sin6_2_sin(&sin, sin6);
6582			addr_touse = (struct sockaddr *)&sin;
6583		}
6584	}
6585#endif
6586#ifdef INET
6587	if (sa->sa_family == AF_INET) {
6588		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6589			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590			*error = EINVAL;
6591			return;
6592		}
6593		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6594		    SCTP_IPV6_V6ONLY(inp)) {
6595			/* can't bind v4 on PF_INET sockets */
6596			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597			*error = EINVAL;
6598			return;
6599		}
6600	}
6601#endif
6602	/*
6603	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6604	 * below is ever changed we may need to lock before calling
6605	 * association level binding.
6606	 */
6607	if (assoc_id == 0) {
6608		/* delete the address */
6609		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6610		    SCTP_DEL_IP_ADDRESS,
6611		    vrf_id, NULL);
6612	} else {
6613		/*
6614		 * FIX: decide whether we allow assoc based bindx
6615		 */
6616	}
6617}
6618
6619/*
6620 * returns the valid local address count for an assoc, taking into account
6621 * all scoping rules
6622 */
6623int
6624sctp_local_addr_count(struct sctp_tcb *stcb)
6625{
6626	int loopback_scope;
6627
6628#if defined(INET)
6629	int ipv4_local_scope, ipv4_addr_legal;
6630
6631#endif
6632#if defined (INET6)
6633	int local_scope, site_scope, ipv6_addr_legal;
6634
6635#endif
6636	struct sctp_vrf *vrf;
6637	struct sctp_ifn *sctp_ifn;
6638	struct sctp_ifa *sctp_ifa;
6639	int count = 0;
6640
6641	/* Turn on all the appropriate scopes */
6642	loopback_scope = stcb->asoc.scope.loopback_scope;
6643#if defined(INET)
6644	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6645	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6646#endif
6647#if defined(INET6)
6648	local_scope = stcb->asoc.scope.local_scope;
6649	site_scope = stcb->asoc.scope.site_scope;
6650	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6651#endif
6652	SCTP_IPI_ADDR_RLOCK();
6653	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6654	if (vrf == NULL) {
6655		/* no vrf, no addresses */
6656		SCTP_IPI_ADDR_RUNLOCK();
6657		return (0);
6658	}
6659	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6660		/*
6661		 * bound all case: go through all ifns on the vrf
6662		 */
6663		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6664			if ((loopback_scope == 0) &&
6665			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6666				continue;
6667			}
6668			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6669				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6670					continue;
6671				switch (sctp_ifa->address.sa.sa_family) {
6672#ifdef INET
6673				case AF_INET:
6674					if (ipv4_addr_legal) {
6675						struct sockaddr_in *sin;
6676
6677						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6678						if (sin->sin_addr.s_addr == 0) {
6679							/*
6680							 * skip unspecified
6681							 * addrs
6682							 */
6683							continue;
6684						}
6685						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6686						    &sin->sin_addr) != 0) {
6687							continue;
6688						}
6689						if ((ipv4_local_scope == 0) &&
6690						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6691							continue;
6692						}
6693						/* count this one */
6694						count++;
6695					} else {
6696						continue;
6697					}
6698					break;
6699#endif
6700#ifdef INET6
6701				case AF_INET6:
6702					if (ipv6_addr_legal) {
6703						struct sockaddr_in6 *sin6;
6704
6705						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6706						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6707							continue;
6708						}
6709						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6710						    &sin6->sin6_addr) != 0) {
6711							continue;
6712						}
6713						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6714							if (local_scope == 0)
6715								continue;
6716							if (sin6->sin6_scope_id == 0) {
6717								if (sa6_recoverscope(sin6) != 0)
6718									/*
6719									 *
6720									 * bad
6721									 *
6722									 * li
6723									 * nk
6724									 *
6725									 * loc
6726									 * al
6727									 *
6728									 * add
6729									 * re
6730									 * ss
6731									 * */
6732									continue;
6733							}
6734						}
6735						if ((site_scope == 0) &&
6736						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6737							continue;
6738						}
6739						/* count this one */
6740						count++;
6741					}
6742					break;
6743#endif
6744				default:
6745					/* TSNH */
6746					break;
6747				}
6748			}
6749		}
6750	} else {
6751		/*
6752		 * subset bound case
6753		 */
6754		struct sctp_laddr *laddr;
6755
6756		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6757		    sctp_nxt_addr) {
6758			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6759				continue;
6760			}
6761			/* count this one */
6762			count++;
6763		}
6764	}
6765	SCTP_IPI_ADDR_RUNLOCK();
6766	return (count);
6767}
6768
6769#if defined(SCTP_LOCAL_TRACE_BUF)
6770
6771void
6772sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6773{
6774	uint32_t saveindex, newindex;
6775
6776	do {
6777		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6778		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6779			newindex = 1;
6780		} else {
6781			newindex = saveindex + 1;
6782		}
6783	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6784	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6785		saveindex = 0;
6786	}
6787	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6788	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6789	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6790	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6791	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6792	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6793	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6794	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6795}
6796
6797#endif
6798static void
6799sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6800{
6801	struct ip *iph;
6802
6803#ifdef INET6
6804	struct ip6_hdr *ip6;
6805
6806#endif
6807	struct mbuf *sp, *last;
6808	struct udphdr *uhdr;
6809	uint16_t port;
6810
6811	if ((m->m_flags & M_PKTHDR) == 0) {
6812		/* Can't handle one that is not a pkt hdr */
6813		goto out;
6814	}
6815	/* Pull the src port */
6816	iph = mtod(m, struct ip *);
6817	uhdr = (struct udphdr *)((caddr_t)iph + off);
6818	port = uhdr->uh_sport;
6819	/*
6820	 * Split out the mbuf chain. Leave the IP header in m, place the
6821	 * rest in the sp.
6822	 */
6823	sp = m_split(m, off, M_NOWAIT);
6824	if (sp == NULL) {
6825		/* Gak, drop packet, we can't do a split */
6826		goto out;
6827	}
6828	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6829		/* Gak, packet can't have an SCTP header in it - too small */
6830		m_freem(sp);
6831		goto out;
6832	}
6833	/* Now pull up the UDP header and SCTP header together */
6834	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6835	if (sp == NULL) {
6836		/* Gak pullup failed */
6837		goto out;
6838	}
6839	/* Trim out the UDP header */
6840	m_adj(sp, sizeof(struct udphdr));
6841
6842	/* Now reconstruct the mbuf chain */
6843	for (last = m; last->m_next; last = last->m_next);
6844	last->m_next = sp;
6845	m->m_pkthdr.len += sp->m_pkthdr.len;
6846	iph = mtod(m, struct ip *);
6847	switch (iph->ip_v) {
6848#ifdef INET
6849	case IPVERSION:
6850		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6851		sctp_input_with_port(m, off, port);
6852		break;
6853#endif
6854#ifdef INET6
6855	case IPV6_VERSION >> 4:
6856		ip6 = mtod(m, struct ip6_hdr *);
6857		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6858		sctp6_input_with_port(&m, &off, port);
6859		break;
6860#endif
6861	default:
6862		goto out;
6863		break;
6864	}
6865	return;
6866out:
6867	m_freem(m);
6868}
6869
6870void
6871sctp_over_udp_stop(void)
6872{
6873	/*
6874	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6875	 * for writting!
6876	 */
6877#ifdef INET
6878	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6879		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6880		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6881	}
6882#endif
6883#ifdef INET6
6884	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6885		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6886		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6887	}
6888#endif
6889}
6890
6891int
6892sctp_over_udp_start(void)
6893{
6894	uint16_t port;
6895	int ret;
6896
6897#ifdef INET
6898	struct sockaddr_in sin;
6899
6900#endif
6901#ifdef INET6
6902	struct sockaddr_in6 sin6;
6903
6904#endif
6905	/*
6906	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6907	 * for writting!
6908	 */
6909	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6910	if (ntohs(port) == 0) {
6911		/* Must have a port set */
6912		return (EINVAL);
6913	}
6914#ifdef INET
6915	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6916		/* Already running -- must stop first */
6917		return (EALREADY);
6918	}
6919#endif
6920#ifdef INET6
6921	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6922		/* Already running -- must stop first */
6923		return (EALREADY);
6924	}
6925#endif
6926#ifdef INET
6927	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6928	    SOCK_DGRAM, IPPROTO_UDP,
6929	    curthread->td_ucred, curthread))) {
6930		sctp_over_udp_stop();
6931		return (ret);
6932	}
6933	/* Call the special UDP hook. */
6934	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6935	    sctp_recv_udp_tunneled_packet))) {
6936		sctp_over_udp_stop();
6937		return (ret);
6938	}
6939	/* Ok, we have a socket, bind it to the port. */
6940	memset(&sin, 0, sizeof(struct sockaddr_in));
6941	sin.sin_len = sizeof(struct sockaddr_in);
6942	sin.sin_family = AF_INET;
6943	sin.sin_port = htons(port);
6944	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6945	    (struct sockaddr *)&sin, curthread))) {
6946		sctp_over_udp_stop();
6947		return (ret);
6948	}
6949#endif
6950#ifdef INET6
6951	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
6952	    SOCK_DGRAM, IPPROTO_UDP,
6953	    curthread->td_ucred, curthread))) {
6954		sctp_over_udp_stop();
6955		return (ret);
6956	}
6957	/* Call the special UDP hook. */
6958	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
6959	    sctp_recv_udp_tunneled_packet))) {
6960		sctp_over_udp_stop();
6961		return (ret);
6962	}
6963	/* Ok, we have a socket, bind it to the port. */
6964	memset(&sin6, 0, sizeof(struct sockaddr_in6));
6965	sin6.sin6_len = sizeof(struct sockaddr_in6);
6966	sin6.sin6_family = AF_INET6;
6967	sin6.sin6_port = htons(port);
6968	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
6969	    (struct sockaddr *)&sin6, curthread))) {
6970		sctp_over_udp_stop();
6971		return (ret);
6972	}
6973#endif
6974	return (0);
6975}
6976